content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python
"""
.. module:: converters
:synopsis: Converters for handling various types of content.
"""
import docutils
import docutils.core
import markdown
import os
content_types = ["markdown", "ReST"]
# Content converters.
def convertMDtoHTML(markdown_text):
return markdown.markdown(markdown_text)
def convertReSTtoHTML(rest_text):
return docutils.core.publish_string(rest_text, writer_name = 'html')
def getHTMLConverter(content_type):
if (content_type == "markdown"):
return convertMDtoHTML
elif (content_type == "ReST"):
return convertReSTtoHTML
else:
raise ContentTypeException(content_type)
def getLinkConverter(content_type):
if (content_type == "markdown"):
return linkConverterMD
elif (content_type == "ReST"):
return linkConverterReST
else:
raise ContentTypeException(content_type)
def linkConverterMD(link_name, link_url, is_image):
extension = os.path.splitext(link_name)[1]
if is_image:
return ""
else:
return "[" + link_name + "](" + link_url + ")"
def linkConverterReST(link_name, link_url, is_image):
extension = os.path.splitext(link_name)[1]
if is_image:
return ".. figure:: " + link_url
else:
return "`" + link_name + " <" + link_url + ">`_"
class ContentTypeException(Exception):
def __init__(self, message):
message = "Error no converter exists for " + message
Exception.__init__(self, message)
|
python
|
import logging
def get_logger():
logger = logging.getLogger("ddns_server")
logger.setLevel(logging.INFO)
log_file = logging.FileHandler("log.log")
log_file.setLevel(logging.INFO)
log_stream = logging.StreamHandler()
log_stream.setLevel(logging.INFO)
formatter = logging.Formatter("[%(asctime)s] %(levelname)s : %(message)s")
log_file.setFormatter(formatter)
log_stream.setFormatter(formatter)
logger.addHandler(log_file)
logger.addHandler(log_stream)
return logger
|
python
|
import imagehash
import pandas as pd
import os
from PIL import Image
from collections import defaultdict
from square_crop import square_crop
directory_path = '/Users/Ziyu/OneDrive - Clarivate Analytics/Desktop/lego_images/'
data_path = '/Users/Ziyu/OneDrive - Clarivate Analytics/Desktop/lego/image_hashes_2.csv'
data_dict = defaultdict(list)
for image_name in os.listdir(directory_path):
image_path = directory_path + image_name
with Image.open(image_path) as image:
image = square_crop(image)
ahash = imagehash.average_hash(image)
dhash = imagehash.dhash(image)
phash = imagehash.phash(image)
whash = imagehash.whash(image)
data_dict['image_name'].append(image_name)
data_dict['ahash'].append(ahash)
data_dict['dhash'].append(dhash)
data_dict['phash'].append(phash)
data_dict['whash'].append(whash)
print('Finished No. %s' % image_name)
data = pd.DataFrame(data_dict)
data.to_csv(data_path)
|
python
|
#!/usr/bin/python
# Import the CGI module
import MySQLdb
import cgi
import cgitb
cgitb.enable()
# Test with: http://192.168.0.100:8000/cgi-bin/AddingDataXml.py?operanda=2&operandb=3&answer=5
# Required header that tells the browser how to render the HTML.
def getHeader():
return """Content-Type: text/xml\n
<?xml version='1.0'?>
<additions>"""
def getClose():
return "</additions>"
# Define function to generate HTML form.
def generate_form(operanda, operandb, answer):
xml = getHeader()
sql = "insert into additions (operanda, operandb, answer) values ({0}, {1}, {2});".format(operanda, operandb, answer)
insertData(sql)
xml += "<addition>"
xml += "<operanda>" + operanda + "</operanda>"
xml += "<operandb>" + operandb + "</operandb>"
xml += "<answer>" + answer + "</answer>"
xml += "</addition>"
xml += "</additions>"
print xml
def insertData(sql):
db = MySQLdb.connect(host="localhost", user="charlie", passwd="bar", db="charlie")
cursor = db.cursor()
cursor.execute(sql)
db.commit()
cursor.close()
db.close()
def generateError(error):
xml = getHeader()
xml += "<addition>" + str(error) + "</addition>"
xml += getClose()
print xml;
def runAction():
form = cgi.FieldStorage()
if (form.has_key("answer") and
form.has_key("operanda") and
form.has_key("operandb")):
generate_form(form["operanda"].value, form["operandb"].value, form["answer"].value)
else:
generateError(form.list);
runAction()
|
python
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
from myuw.dao.registration import get_schedule_by_term
from myuw.dao.instructor_schedule import get_instructor_schedule_by_term
from myuw.dao.term import get_current_quarter, get_current_summer_term
from myuw.dao.building import get_building_by_code
from restclients_core.exceptions import DataFailureException
from dateutil.relativedelta import *
from datetime import timedelta
import math
import copy
logger = logging.getLogger(__name__)
def get_schedule_json(visual_schedule, term, summer_term=None):
response = {}
schedule_periods = []
period_id = 0
for period in visual_schedule:
period_data = period.json_data()
if period.meetings_trimmed_front:
period_data['disabled_days'] = \
_get_disabled_days(period.start_date, True)
if period.meetings_trimmed_back:
period_data['disabled_days'] = \
_get_disabled_days(period.end_date, False)
if period.is_finals:
period_data['id'] = 'finals'
else:
period_data['id'] = period_id
schedule_periods.append(period_data)
period_id += 1
for section in period_data["sections"]:
for meeting in section["meetings"]:
if 'building' in meeting:
building_code = meeting["building"]
building = get_building_by_code(building_code)
if building is not None:
meeting["latitude"] = building.latitude
meeting["longitude"] = building.longitude
response['periods'] = schedule_periods
# Add term data for schedule
response['term'] = {
'year': term.year,
'quarter': term.quarter,
'first_day_quarter': term.first_day_quarter,
'last_day_instruction': term.last_day_instruction,
'aterm_last_date': term.aterm_last_date,
'bterm_first_date': term.bterm_first_date,
'last_final_exam_date': term.last_final_exam_date,
'summer_term': summer_term
}
response['off_term_trimmed'] = _get_off_term_trimmed(visual_schedule)
return response
def _get_disabled_days(date, is_before):
disabled_days = {'sunday': False,
'monday': False,
'tuesday': False,
'wednesday': False,
'thursday': False,
'friday': False,
'saturday': False}
day_index = date.weekday()
if day_index == 6:
day_index = 0
else:
day_index += 1
if is_before:
if day_index > 0:
disabled_days['sunday'] = True
if day_index > 1:
disabled_days['monday'] = True
if day_index > 2:
disabled_days['tuesday'] = True
if day_index > 3:
disabled_days['wednesday'] = True
if day_index > 4:
disabled_days['thursday'] = True
if day_index > 5:
disabled_days['friday'] = True
else:
if day_index < 6:
disabled_days['saturday'] = True
if day_index < 5:
disabled_days['friday'] = True
if day_index < 4:
disabled_days['thursday'] = True
if day_index < 3:
disabled_days['wednesday'] = True
if day_index < 2:
disabled_days['tuesday'] = True
if day_index < 1:
disabled_days['monday'] = True
return disabled_days
def _get_off_term_trimmed(visual_schedule):
seen_sections = {}
trimmed_sections = []
for period in visual_schedule:
for section in period.sections:
if hasattr(section, 'real_end_date'):
section_slug = '{} {} {}'.format(section.curriculum_abbr,
section.course_number,
section.section_id)
seen_sections[section_slug] = section
for slug, section in seen_sections.items():
trimmed_sections.append({'section': slug,
'end_date': section.real_end_date})
return trimmed_sections
def get_future_visual_schedule(request, term, summer_term=None):
schedule = _get_combined_future_schedule(request, term, summer_term)
if schedule is None or len(schedule.sections) == 0:
return None
vs = get_visual_schedule_from_schedule(request, schedule, summer_term)
return vs
def get_current_visual_schedule(request):
schedule = _get_combined_schedule(request)
if schedule is None or len(schedule.sections) == 0:
return None, None, None
summer_term = None
if schedule.term.is_summer_quarter():
summer_term = schedule.summer_term
vs = get_visual_schedule_from_schedule(request, schedule, summer_term)
return vs, schedule.term, summer_term
def get_visual_schedule_from_schedule(request, schedule, summer_term):
visual_schedule = _get_visual_schedule_from_schedule(
schedule, request, summer_term)
if summer_term and _is_split_summer(schedule):
visual_schedule = _trim_summer_term(visual_schedule, summer_term)
return visual_schedule
def _get_combined_schedule(request):
try:
student_schedule = get_schedule_by_term(request)
_set_student_sections(student_schedule)
except DataFailureException:
student_schedule = None
try:
instructor_schedule = get_instructor_schedule_by_term(request)
_set_instructor_sections(instructor_schedule)
except DataFailureException:
instructor_schedule = None
return __combine_schedules(student_schedule, instructor_schedule)
def _get_combined_future_schedule(request, term, summer_term):
try:
student_schedule = get_schedule_by_term(
request, term=term, summer_term=summer_term)
_set_student_sections(student_schedule)
except DataFailureException:
student_schedule = None
try:
instructor_schedule = get_instructor_schedule_by_term(
request, term=term, summer_term=summer_term)
_set_instructor_sections(instructor_schedule)
except DataFailureException:
instructor_schedule = None
return __combine_schedules(student_schedule, instructor_schedule)
def __combine_schedules(student_schedule, instructor_schedule):
if (student_schedule is None or
len(student_schedule.sections) == 0):
return instructor_schedule
schedule = student_schedule
if instructor_schedule is not None:
schedule.sections += instructor_schedule.sections
return schedule
def _set_instructor_sections(instructor_schedule):
for section in instructor_schedule.sections:
section.is_teaching = True
return instructor_schedule
def _set_student_sections(student_schedule):
for section in student_schedule.sections:
section.is_teaching = False
return student_schedule
def _get_visual_schedule_from_schedule(schedule, request, summer_term):
# common courses default to term start/end dates
_add_dates_to_sections(schedule)
if _is_split_summer(schedule):
_adjust_off_term_dates(schedule)
a_bounds, b_bounds = get_summer_schedule_bounds(schedule)
a_weeks = _get_weeks_from_bounds(a_bounds)
for week in a_weeks:
week.summer_term = "A-term"
a_weeks = _add_sections_to_weeks(schedule.sections, a_weeks)
a_consolidated = _consolidate_weeks(a_weeks)
trim_summer_meetings(a_consolidated)
a_consolidated[-1].meetings_trimmed_back = True
b_weeks = _get_weeks_from_bounds(b_bounds)
for week in b_weeks:
week.summer_term = "B-term"
b_weeks = _add_sections_to_weeks(schedule.sections, b_weeks)
b_consolidated = _consolidate_weeks(b_weeks)
trim_summer_meetings(b_consolidated)
b_consolidated[0].meetings_trimmed_front = True
consolidated = a_consolidated + b_consolidated
else:
try:
# find sections beyond term
bounds = get_schedule_bounds(schedule)
weeks = _get_weeks_from_bounds(bounds)
weeks = _add_qtr_start_data_to_weeks(weeks, schedule)
weeks = _add_sections_to_weeks(schedule.sections, weeks)
weeks = trim_section_meetings(weeks)
weeks = trim_weeks_no_meetings(weeks)
consolidated = _consolidate_weeks(weeks)
except AttributeError:
return None
_add_weekend_meeting_data(consolidated)
consolidated = _remove_empty_periods(consolidated)
_adjust_period_dates(consolidated)
finals = _get_finals_period(schedule)
if len(finals.sections) > 0:
consolidated.append(finals)
return consolidated
def _add_qtr_start_data_to_weeks(weeks, schedule):
if schedule.term.quarter != "summer":
qtr_start = schedule.term.first_day_quarter
for week in weeks:
if week.start_date < qtr_start < week.end_date:
week.qtr_start = schedule.term.first_day_quarter
return weeks
def _remove_empty_periods(schedule):
periods = []
for period in schedule:
try:
if len(period.sections) > 0:
periods.append(period)
except AttributeError:
pass
return periods
def _adjust_off_term_dates(schedule):
qtr_end_date = schedule.term.last_day_instruction
for section in schedule.sections:
if section.end_date > qtr_end_date:
section.real_end_date = section.end_date
section.end_date = qtr_end_date
def _adjust_period_dates(schedule):
i = 0
for period in schedule:
i += 1
# modify start date
if period.qtr_start:
period.start_date = period.qtr_start
else:
if period.meetings_trimmed_front:
try:
new_start = _get_earliest_start_from_period(period)
period.start_date = new_start
except TypeError:
# section has no meetings, leave date alone
pass
if not period.meets_sunday and not period.meetings_trimmed_front:
period.start_date = period.start_date + timedelta(days=1)
# modify end date
if period.meetings_trimmed_back:
try:
new_end = _get_latest_end_from_period(period)
period.end_date = new_end
except TypeError:
# section has no meetings, leave date alone
pass
if not period.meets_saturday and not period.meetings_trimmed_back:
period.end_date = period.end_date - timedelta(days=1)
def _get_earliest_start_from_period(period):
"""
return the earliest date in the period
"""
earliest_meeting = None # week day
for section in period.sections:
for meeting in section.meetings:
if meeting.wont_meet():
# if a section has a NON mtg set start date to section start
return section.start_date
earliest_section_meeting = _get_earliest_meeting_day(meeting)
if earliest_section_meeting is not None:
if earliest_meeting is None:
earliest_meeting = earliest_section_meeting
elif earliest_section_meeting < earliest_meeting:
earliest_meeting = earliest_section_meeting
start_day = period.start_date.weekday()
# Treat sunday as 'first' day
if start_day == 6:
days_to_add = earliest_meeting + 1
else:
days_to_add = earliest_meeting - start_day
start_date = (period.start_date + timedelta(days=days_to_add))
return start_date
def _get_latest_end_from_period(period):
latest_meeting = None
for section in period.sections:
for meeting in section.meetings:
if meeting.wont_meet():
# if a section has a NON mtg set end date to section end
return section.end_date
latest_section_meeting = _get_latest_meeting_day(meeting)
if latest_meeting is None:
latest_meeting = latest_section_meeting
elif latest_meeting < latest_section_meeting:
latest_meeting = latest_section_meeting
end_day = period.end_date.weekday()
days_to_subtract = end_day - latest_meeting
end_date = period.end_date - timedelta(days=days_to_subtract)
return end_date
def _get_earliest_meeting_day(meeting):
day_index = None
if meeting.meets_saturday:
day_index = 5
if meeting.meets_friday:
day_index = 4
if meeting.meets_thursday:
day_index = 3
if meeting.meets_wednesday:
day_index = 2
if meeting.meets_tuesday:
day_index = 1
if meeting.meets_monday:
day_index = 0
if meeting.meets_sunday:
day_index = 6
return day_index
def _get_latest_meeting_day(meeting):
day_index = None
if meeting.meets_sunday:
day_index = 6
if meeting.meets_monday:
day_index = 0
if meeting.meets_tuesday:
day_index = 1
if meeting.meets_wednesday:
day_index = 2
if meeting.meets_thursday:
day_index = 3
if meeting.meets_friday:
day_index = 4
if meeting.meets_saturday:
day_index = 5
return day_index
def _get_finals_period(schedule):
finals = SchedulePeriod()
finals.is_finals = True
finals.sections = copy.deepcopy(schedule.sections)
return finals
def trim_weeks_no_meetings(weeks):
trimmed_weeks = copy.copy(weeks)
for week in weeks:
non_meeting_sections = []
for section in week.sections:
is_non_meeting = True
for meeting in section.meetings:
if meeting.wont_meet() or not meeting.no_meeting():
is_non_meeting = False
if is_non_meeting:
non_meeting_sections.append(section)
if len(non_meeting_sections) == len(week.sections):
trimmed_weeks.remove(week)
return trimmed_weeks
def trim_section_meetings(weeks):
for week in weeks:
front_trim_count = 0
back_trim_count = 0
for section in week.sections:
if section.start_date > week.start_date:
trimmed = _trim_section_before(section, section.start_date)
if trimmed:
front_trim_count += 1
if section.end_date < week.end_date:
trimmed = _trim_section_after(section, section.end_date)
if trimmed:
back_trim_count += 1
if front_trim_count > 0:
week.meetings_trimmed = True
week.meetings_trimmed_front = True
if back_trim_count > 0:
week.meetings_trimmed = True
week.meetings_trimmed_back = True
return weeks
def get_summer_schedule_bounds(schedule):
a_start = schedule.term.first_day_quarter
# set start to first sunday
if a_start.strftime('%w') != 0:
days_to_remove = int(a_start.strftime('%w'))
a_start -= relativedelta(days=days_to_remove)
b_end = schedule.term.last_day_instruction
# set end to last saturday
if b_end.strftime('%w') != 6:
days_to_add = 6 - int(b_end.strftime('%w'))
b_end += relativedelta(days=days_to_add)
a_bounds = a_start, schedule.term.aterm_last_date
b_bounds = schedule.term.bterm_first_date, b_end
return a_bounds, b_bounds
def trim_summer_meetings(weeks):
if weeks[0].summer_term == "A-term":
week_to_trim = weeks[-1]
week_to_trim.sections = _trim_sections_after(week_to_trim.sections,
week_to_trim.end_date)
if weeks[0].summer_term == "B-term":
week_to_trim = weeks[0]
week_to_trim.sections = _trim_sections_before(week_to_trim.sections,
week_to_trim.start_date)
return weeks
def _trim_sections_after(sections, date):
cutoff_day = int(date.strftime('%w'))
for section in sections:
if section.summer_term == "A-term" and section.end_date > date:
# preserve a-term course meetings that goes beyond term last day
continue
for meeting in section.meetings:
if cutoff_day <= 5:
meeting.meets_saturday = False
if cutoff_day <= 4:
meeting.meets_friday = False
if cutoff_day <= 3:
meeting.meets_thursday = False
if cutoff_day <= 2:
meeting.meets_wednesday = False
if cutoff_day <= 1:
meeting.meets_tuesday = False
if cutoff_day <= 0:
meeting.meets_monday = False
return sections
def _trim_sections_before(sections, date):
cutoff_day = int(date.strftime('%w'))
for section in sections:
if section.summer_term == "B-term" and section.start_date < date:
# preserve b-term course meetings that occurs before term 1st day
continue
for meeting in section.meetings:
if cutoff_day >= 1:
meeting.meets_sunday = False
if cutoff_day >= 2:
meeting.meets_monday = False
if cutoff_day >= 3:
meeting.meets_tuesday = False
if cutoff_day >= 4:
meeting.meets_wednesday = False
if cutoff_day >= 5:
meeting.meets_thursday = False
if cutoff_day >= 6:
meeting.meets_friday = False
return sections
def _trim_section_after(section, date):
cutoff_day = int(date.strftime('%w'))
trimmed = False
for meeting in section.meetings:
if cutoff_day <= 5:
if meeting.meets_saturday:
trimmed = True
meeting.meets_saturday = False
if cutoff_day <= 4:
if meeting.meets_friday:
trimmed = True
meeting.meets_friday = False
if cutoff_day <= 3:
if meeting.meets_thursday:
trimmed = True
meeting.meets_thursday = False
if cutoff_day <= 2:
if meeting.meets_wednesday:
trimmed = True
meeting.meets_wednesday = False
if cutoff_day <= 1:
if meeting.meets_tuesday:
trimmed = True
meeting.meets_tuesday = False
if cutoff_day <= 0:
if meeting.meets_monday:
trimmed = True
meeting.meets_monday = False
return trimmed
def _trim_section_before(section, date):
cutoff_day = int(date.strftime('%w'))
trimmed = False
for meeting in section.meetings:
if cutoff_day >= 1:
if meeting.meets_sunday:
trimmed = True
meeting.meets_sunday = False
if cutoff_day >= 2:
if meeting.meets_monday:
trimmed = True
meeting.meets_monday = False
if cutoff_day >= 3:
if meeting.meets_tuesday:
trimmed = True
meeting.meets_tuesday = False
if cutoff_day >= 4:
if meeting.meets_wednesday:
trimmed = True
meeting.meets_wednesday = False
if cutoff_day >= 5:
if meeting.meets_thursday:
trimmed = True
meeting.meets_thursday = False
if cutoff_day >= 6:
if meeting.meets_friday:
trimmed = True
meeting.meets_friday = False
return trimmed
def _is_split_summer(schedule):
if schedule.term.quarter != 'summer':
return False
for section in schedule.sections:
if section.summer_term != "Full-term":
return True
def _add_weekend_meeting_data(weeks):
for week in weeks:
try:
for section in week.sections:
for meeting in section.meetings:
if meeting.meets_saturday:
week.meets_saturday = True
if meeting.meets_sunday:
week.meets_sunday = True
except AttributeError:
pass
return weeks
def _add_sections_to_weeks(sections, weeks):
for week in weeks:
for section in sections:
if section.start_date <= week.end_date \
and section.end_date >= week.start_date:
# make a copy of section as we'll modify meetings per week
week.sections.append(copy.deepcopy(section))
return weeks
def _consolidate_weeks(weeks):
consolidated_weeks = []
prev_week = None
for week in weeks:
if prev_week is None:
prev_week = week
else:
will_merge = True
# Don't merge last week of A-term
if week.summer_term == "A-term" \
and weeks.index(week) == len(weeks) - 1:
will_merge = False
# Don't merge 2nd week of B term with 1st
elif week.summer_term == "B-term" and weeks.index(week) == 1:
will_merge = False
else:
# Merge weeks with same sections
if _section_lists_are_same(prev_week.sections, week.sections):
will_merge = True
else:
will_merge = False
if week.meetings_trimmed or prev_week.meetings_trimmed:
will_merge = False
if will_merge:
prev_week.end_date = week.end_date
else:
consolidated_weeks.append(prev_week)
prev_week = week
# Append last week block
consolidated_weeks.append(prev_week)
return consolidated_weeks
def _section_lists_are_same(list1, list2):
if len(list1) is not len(list2):
return False
for l1_section in list1:
found_match = False
for l2_section in list2:
if _sections_are_same(l1_section, l2_section):
found_match = True
if not found_match:
return False
return True
def _sections_are_same(section1, section2):
return (section1.curriculum_abbr == section2.curriculum_abbr) \
and (section1.course_number == section2.course_number) \
and (section1.section_id == section2.section_id)
def _get_weeks_from_bounds(bounds):
start, end = bounds
periods = []
# weeks between start>end dates, including first day
schedule_length = math.ceil(((end-start).days + 1)/7.0)
while schedule_length > 0:
period = SchedulePeriod()
period.start_date = start
start_day = int(start.strftime('%w'))
end_offset = 6-start_day
end_date = (start + timedelta(days=end_offset))
# handle case where week ends midweek
if end_date > end:
end_date = end
period.end_date = end_date
periods.append(period)
next_start_offset = 7-start_day
start = (start + timedelta(days=next_start_offset))
schedule_length -= 1
return periods
def get_schedule_bounds(schedule):
start = None
end = None
for section in schedule.sections:
if start is None:
start = section.start_date
elif start > section.start_date:
start = section.start_date
if end is None:
end = section.end_date
elif end < section.end_date:
end = section.end_date
# set start to first sunday
if int(start.strftime('%w')) != 0:
days_to_remove = int(start.strftime('%w'))
start = start - relativedelta(days=days_to_remove)
# set end to last saturday
if int(end.strftime('%w')) != 6:
days_to_add = 6 - int(end.strftime('%w'))
end += relativedelta(days=days_to_add)
return start, end
def _add_dates_to_sections(schedule):
"""
Adds term start/end dates to sections that do not have them (ie non-PCE)
"""
for section in schedule.sections:
if section.start_date is None:
if section.summer_term == "B-term":
section.start_date = schedule.term.bterm_first_date
else:
section.start_date = schedule.term.first_day_quarter
if section.end_date is None:
if section.summer_term == "A-term":
section.end_date = schedule.term.aterm_last_date
else:
section.end_date = schedule.term.last_day_instruction
return schedule
def _trim_summer_term(schedule, summer_term):
term_periods = []
for period in schedule:
if period.summer_term is not None:
if period.summer_term.lower() == summer_term:
term_periods.append(period)
return term_periods
class SchedulePeriod():
def __init__(self):
self.start_date = None
self.end_date = None
self.sections = []
self.meets_saturday = False
self.meets_sunday = False
self.is_finals = False
self.qtr_start = None
# sections will be either A term OR B term, full term classes will
# be split into corresponding A and B term pieces
self.summer_term = None
self.meetings_trimmed = False
self.meetings_trimmed_front = False
self.meetings_trimmed_back = False
def json_data(self):
section_data = []
for section in self.sections:
section_json = section.json_data()
try:
section_json['color_id'] = section.color_id
except AttributeError:
pass
section_json['is_teaching'] = section.is_teaching
section_data.append(section_json)
data = {'start_date': self.start_date,
'end_date': self.end_date,
'meets_saturday': self.meets_saturday,
'meets_sunday': self.meets_sunday,
'sections': section_data}
return data
|
python
|
#coding:utf-8
import scapy_http.http as HTTP
from scapy.all import *
from scapy.error import Scapy_Exception
import time
import re
import os
from xmlrpclib import ServerProxy
count=0
pt = re.compile('(GET|POST).*(HTTP)')
ag = re.compile('(User-Agent:).*')
s = ServerProxy("http://localhost:8888")
def getURL(data):
url = data.strip('GET')
url = url.strip('POST')
url = url.strip('HTTP')
url = url.replace('%','%25')
return url.strip()
def afterRPC(msg, attack_ip, request, time):
res = int(msg[0]['res'])
url = msg[0]['url']
if res ==0:
print "检测到恶意请求,将被记录"
print "攻击者:" + attack_ip
match = ag.match(request)
if match:
print "yes"
print match.group()
data = time+'|'+attack_ip+'|'+url+'|'+time+'||'
os.system('echo "' + data +'" >> log.txt')
#f = open('log.txt','a+')
#f.write(data)
#f.flush()
#f.close()
def pktTCP(pkt):
global count
count=count+1
#print count
if HTTP.HTTPRequest or HTTP.HTTPResponse in pkt:
src=pkt[IP].src
srcport=pkt[IP].sport
dst=pkt[IP].dst
dstport=pkt[IP].dport
res_payload=str(pkt[TCP].payload)
if HTTP.HTTPRequest in pkt:
log_time = str(time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time())))
print log_time
print "HTTP Request:"
#print res_payload
match = pt.match(res_payload)
if match:
data = str(match.group())
print data
url = getURL(data)
print url
afterRPC(s.rpc_ai_waf(url), src, res_payload, log_time)
if HTTP.HTTPResponse in pkt:
#print "HTTP Response:"
#try:
# headers,body= str(test).split("\r\n\r\n", 1)
# print headers
#except Exception,e:
# print e
print "======================================================================"
else:
print pkt[IP].src,pkt[IP].sport,'->',pkt[TCP].flags
print 'other'
sniff(filter='tcp and port 80',prn=pktTCP,iface='eth0')
|
python
|
# coding=utf-8
import unittest
from rozipparser.codeparser import CodeParser
class TestSmallLocalityParsing(unittest.TestCase):
def test_number_of_codes(self):
parser = CodeParser("rozipparser/tests/inputs/small_locality_input.xlsx")
codes = parser.get_codes()
self.assertEqual(len(codes), 9)
def test_code_correctness(self):
parser = CodeParser("rozipparser/tests/inputs/small_locality_input.xlsx")
codes = parser.get_codes()
first_code = codes[0]
self.assertEqual(first_code.county, u"Ilfov")
self.assertEqual(first_code.locality, u"Buftea")
self.assertIsNone(first_code.sector)
self.assertIsNone(first_code.street)
self.assertIsNone(first_code.house_number)
self.assertEqual(first_code.zip, u"070000")
self.assertIsNone(first_code.street_type)
|
python
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.58
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VideoCategory(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, id_category=None, id_media_source=None, filename=None, cover=None, thumbnail=None, date_add=None, date_upd=None):
"""
VideoCategory - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'id_category': 'int',
'id_media_source': 'int',
'filename': 'str',
'cover': 'str',
'thumbnail': 'str',
'date_add': 'str',
'date_upd': 'str'
}
self.attribute_map = {
'id': 'id',
'id_category': 'id_category',
'id_media_source': 'id_media_source',
'filename': 'filename',
'cover': 'cover',
'thumbnail': 'thumbnail',
'date_add': 'date_add',
'date_upd': 'date_upd'
}
self._id = id
self._id_category = id_category
self._id_media_source = id_media_source
self._filename = filename
self._cover = cover
self._thumbnail = thumbnail
self._date_add = date_add
self._date_upd = date_upd
@property
def id(self):
"""
Gets the id of this VideoCategory.
:return: The id of this VideoCategory.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this VideoCategory.
:param id: The id of this VideoCategory.
:type: int
"""
self._id = id
@property
def id_category(self):
"""
Gets the id_category of this VideoCategory.
:return: The id_category of this VideoCategory.
:rtype: int
"""
return self._id_category
@id_category.setter
def id_category(self, id_category):
"""
Sets the id_category of this VideoCategory.
:param id_category: The id_category of this VideoCategory.
:type: int
"""
self._id_category = id_category
@property
def id_media_source(self):
"""
Gets the id_media_source of this VideoCategory.
:return: The id_media_source of this VideoCategory.
:rtype: int
"""
return self._id_media_source
@id_media_source.setter
def id_media_source(self, id_media_source):
"""
Sets the id_media_source of this VideoCategory.
:param id_media_source: The id_media_source of this VideoCategory.
:type: int
"""
self._id_media_source = id_media_source
@property
def filename(self):
"""
Gets the filename of this VideoCategory.
:return: The filename of this VideoCategory.
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""
Sets the filename of this VideoCategory.
:param filename: The filename of this VideoCategory.
:type: str
"""
self._filename = filename
@property
def cover(self):
"""
Gets the cover of this VideoCategory.
:return: The cover of this VideoCategory.
:rtype: str
"""
return self._cover
@cover.setter
def cover(self, cover):
"""
Sets the cover of this VideoCategory.
:param cover: The cover of this VideoCategory.
:type: str
"""
self._cover = cover
@property
def thumbnail(self):
"""
Gets the thumbnail of this VideoCategory.
:return: The thumbnail of this VideoCategory.
:rtype: str
"""
return self._thumbnail
@thumbnail.setter
def thumbnail(self, thumbnail):
"""
Sets the thumbnail of this VideoCategory.
:param thumbnail: The thumbnail of this VideoCategory.
:type: str
"""
self._thumbnail = thumbnail
@property
def date_add(self):
"""
Gets the date_add of this VideoCategory.
:return: The date_add of this VideoCategory.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this VideoCategory.
:param date_add: The date_add of this VideoCategory.
:type: str
"""
self._date_add = date_add
@property
def date_upd(self):
"""
Gets the date_upd of this VideoCategory.
:return: The date_upd of this VideoCategory.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this VideoCategory.
:param date_upd: The date_upd of this VideoCategory.
:type: str
"""
self._date_upd = date_upd
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
python
|
import cv2
import numpy as np
img = cv2.imread('prof.jpeg', cv2.IMREAD_COLOR)
img = cv2.resize(img, (500, 500))
# perfom a filter with a kernel
# its like blurring it removes edges and averages
blur1 = cv2.blur(img, (3,3))
# well in this case they are the same
# Gaussian filter creation cv2.getGaussianKernel().
blur2 = cv2.GaussianBlur(img,(5,5),0)
median = cv2.medianBlur(img, 5)
cv2.imshow('img', img)
cv2.imshow('blur1', blur1)
cv2.imshow('blur2', blur2)
cv2.imshow('median', median)
|
python
|
from django import template
from ..models import Category
register = template.Library()
@register.simple_tag
def title():
return 'وبلاگ جنگویی'
@register.simple_tag
def brand_name():
return 'جنگو وب'
@register.inclusion_tag('blog/partial/category_navbar.html')
def category_navbar():
return {
'categories' : Category.objects.available()
}
@register.inclusion_tag('blog/partial/category_tags.html')
def article_category_tags(article):
return {
'categories' : article.category.available()
}
|
python
|
import random
def generate_email():
prefix = 'ak' + str(''.join([random.choice(list('qwertyuio')) for x in range(5)]))
return f"{prefix}@mail.ru"
def generate_name():
prefix = 'Pug_' + str(''.join([random.choice(list('123456789')) for x in range(2)]))
return prefix
def test_login(app):
admin_form = app.forms.open_admin_form()
admin_form.login_as_admin()
def test_task_7(app):
admin_form = app.forms.open_admin_form()
admin_form.login_as_admin()
admin_form.click_all_menu_buttons()
def test_task_8(app):
store_form = app.forms.open_store_form()
store_form.verify_products_has_one_sticker()
def test_task_9_verify_sort_in_all_countries_list(app):
"""Проверяет сортировку всех стран"""
admin_form = app.forms.open_admin_form()
admin_form.login_as_admin()
admin_form.open_countries_tab()
admin_form.verify_sort_for_all_country()
admin_form.verify_sort_zones_in_country()
def test_task_9_verify_sort_for_zone_list_in_country(app):
"""Проверяет сортировку зон в карточке страны"""
admin_form = app.forms.open_admin_form()
admin_form.login_as_admin()
admin_form.open_countries_tab()
admin_form.verify_sort_zones_in_country()
def test_task_9_verify_sort_zones_in_geo_zone(app):
"""Проверяет сортировку зон в карточке страны на вкладке Geo zone"""
admin_form = app.forms.open_admin_form()
admin_form.login_as_admin()
admin_form.open_geo_zones_tab()
admin_form.verify_sort_zones_in_geo_zone()
def test_task_10_verify_fields(app):
"""Проверки на главной странице и на странице товара совпадают текст названия товара, акционная и обычная цена"""
store_form = app.forms.open_store_form()
store_form.verify_product()
def test_task_10_verify_product_style_on_main_page(app):
"""Проверки стилей акционной и обычной цен продукта на главной странице"""
store_form = app.forms.open_store_form()
store_form.verify_regular_price_is_through()
store_form.verify_regular_price_color_is_gray()
store_form.verify_campaign_price_color_is_red()
store_form.verify_campaign_price_color_is_bold()
def test_task_10_verify_product_style_in_product_card(app):
"""Проверки стилей акционной и обычной цен продукта в карточке продукта"""
store_form = app.forms.open_store_form()
store_form.open_duck_card()
store_form.verify_regular_price_color_is_gray_in_card()
store_form.verify_regular_price_is_through_in_card()
store_form.verify_campaign_price_color_is_red_in_card()
store_form.verify_campaign_price_color_is_bold_in_card()
def test_task_10_verify_text_size_on_main_page(app):
"""Проверка размера цен продукта на главной странице"""
store_form = app.forms.open_store_form()
store_form.verify_size_of_prices()
def test_task_10_verify_text_size_in_product_card(app):
"""Проверка размера цен продукта в карточке продукта"""
store_form = app.forms.open_store_form()
store_form.open_duck_card()
store_form.verify_size_of_prices_in_card()
def test_task_11(app):
email = generate_email()
password = "Qwe12345"
new_user_form = app.forms.open_new_user_form()
new_user_form.create_user(email, password)
app.forms.store_form.logout()
app.forms.store_form.login(email, password)
def test_task_12(app):
product_name = generate_name()
admin_form = app.forms.open_admin_form()
admin_form.login_as_admin()
new_product_form = app.forms.open_new_product_form()
new_product_form.fill_form(product_name)
admin_form.verify_product_presents(product_name)
def test_task_13(app):
store_form = app.forms.open_store_form()
product_cart = store_form.open_first_product_card()
product_cart.add_product_to_cart()
store_form = app.forms.open_store_form()
product_cart = store_form.open_first_product_card()
product_cart.add_product_to_cart()
store_form = app.forms.open_store_form()
product_cart = store_form.open_first_product_card()
product_cart.add_product_to_cart()
cart_form = app.forms.open_cart()
cart_form.remove_all_products()
def test_task_14(app):
admin_form = app.forms.open_admin_form()
admin_form.login_as_admin()
admin_form.open_countries_tab()
admin_form.open_new_country_form()
admin_form.open_all_external_links()
def test_task_17(app):
admin_form = app.forms.open_admin_form()
admin_form.login_as_admin()
admin_form.open_catalog_tab()
admin_form.get_browser_log()
|
python
|
# coding: utf-8
################################################################################
# CS 224W (Fall 2017) - HW1
# Code for Problem 1
# Author: [email protected]
# Last Updated: Oct 8, 2017
################################################################################
import snap
import numpy as np
import matplotlib.pyplot as plt
# Setup
erdosRenyi = None
smallWorld = None
collabNet = None
SUCCESS = -1
FAILURE = -2
Rnd = snap.TRnd(42)
Rnd.Randomize()
# Problem 1.1
def genErdosRenyi(N=5242, E=14484):
"""
:param - N: number of nodes
:param - E: number of edges
return type: snap.PUNGraph
return: Erdos-Renyi graph with N nodes and E edges
"""
############################################################################
# TODO: Your code here!
assert N >= 0 and E >= 0
Graph = snap.TUNGraph.New(N, E)
# Verify the requested number of edges is reasonable, otherwise we
# could run into an infinite loop.
assert(N*(N-1)/2 >= E)
nodes = [Graph.AddNode(node) for node in xrange(N)]
edges = 0
while edges < E:
srcID = Graph.GetRndNId(Rnd)
dstID = Graph.GetRndNId(Rnd)
if srcID != dstID and Graph.AddEdge(srcID, dstID) == SUCCESS:
edges += 1
############################################################################
return Graph
def genCircle(N=5242):
"""
:param - N: number of nodes
return type: snap.PUNGraph
return: Circle graph with N nodes and N edges. Imagine the nodes form a
circle and each node is connected to its two direct neighbors.
"""
############################################################################
# TODO: Your code here!
# Given the above definition, we assume that:
# N = 1 -> we have a single node with a self-loop
# N = 2 -> We have two nodes with a single edge between them
# (rather than 2).
assert N >= 0
Graph = snap.TUNGraph.New(N, N)
for node in xrange(N):
Graph.AddNode(node)
for node in xrange(N):
Graph.AddEdge(node, (node + 1) % N)
assert Graph.GetEdges() == N
############################################################################
return Graph
def connectNbrOfNbr(Graph, N=5242):
"""
:param - Graph: snap.PUNGraph object representing a circle graph on N nodes
:param - N: number of nodes
return type: snap.PUNGraph
return: Graph object with additional N edges added by connecting each node
to the neighbors of its neighbors
"""
############################################################################
# TODO: Your code here!
# Modifications to the graph happen in place, so collect a list of
# new edges to add. We do not assume any particular ordering of the
# nodes.
newEdges = []
for node in Graph.Nodes():
src = node.GetId()
# Each node in a circle graph should have 2 neighbors.
neighbors = [Graph.GetNI(node.GetNbrNId(i))
for i in xrange(node.GetDeg())]
assert len(neighbors) == 2
for neighbor in neighbors:
assert neighbor.GetDeg() == 2
candidateDest = [neighbor.GetNbrNId(i)
for i in xrange(neighbor.GetDeg())
if neighbor.GetNbrNId(i) != src]
# Only one more edge should be added.
assert len(candidateDest) == 1
dst = candidateDest[0]
newEdges.append((min(src, dst), max(src, dst)))
# Clear duplicates.
newEdges = list(set(newEdges))
assert len(newEdges) == N
for (srcID, dstID) in newEdges:
Graph.AddEdge(srcID, dstID)
assert Graph.GetNodes() == N
assert Graph.GetEdges() == 2*N
############################################################################
return Graph
def connectRandomNodes(Graph, M=4000):
"""
:param - Graph: snap.PUNGraph object representing an undirected graph
:param - M: number of edges to be added
return type: snap.PUNGraph
return: Graph object with additional M edges added by connecting M randomly
selected pairs of nodes not already connected.
"""
############################################################################
# TODO: Your code here!
E = Graph.GetEdges()
N = Graph.GetNodes()
assert (N * (N - 1) / 2 - E) >= M
edges = 0
while edges < M:
srcID = Graph.GetRndNId(Rnd)
dstID = Graph.GetRndNId(Rnd)
if srcID != dstID and Graph.AddEdge(srcID, dstID) == SUCCESS:
edges += 1
assert Graph.GetNodes() == N
assert Graph.GetEdges() == (E + M)
############################################################################
return Graph
def genSmallWorld(N=5242, E=14484):
"""
:param - N: number of nodes
:param - E: number of edges
return type: snap.PUNGraph
return: Small-World graph with N nodes and E edges
"""
Graph = genCircle(N)
Graph = connectNbrOfNbr(Graph, N)
Graph = connectRandomNodes(Graph, 4000)
return Graph
def loadCollabNet(path):
"""
:param - path: path to edge list file
return type: snap.PUNGraph
return: Graph loaded from edge list at `path` and self edges removed
Do not forget to remove the self edges!
"""
############################################################################
# TODO: Your code here!
Graph = snap.LoadEdgeList(snap.PUNGraph, path, 0, 1, "\t")
for node in Graph.Nodes():
Graph.DelEdge(node.GetId(), node.GetId())
assert Graph.GetNodes() == 5242
assert Graph.GetEdges() == 14484
############################################################################
return Graph
def getDataPointsToPlot(Graph):
"""
:param - Graph: snap.PUNGraph object representing an undirected graph
return values:
X: list of degrees
Y: list of frequencies: Y[i] = fraction of nodes with degree X[i]
"""
############################################################################
# TODO: Your code here!
degreeDistribution = snap.TIntPrV()
snap.GetDegCnt(Graph, degreeDistribution)
N = float(Graph.GetNodes())
X, Y = [], []
for item in degreeDistribution:
X.append(item.GetVal1())
Y.append(float(item.GetVal2()) / N)
############################################################################
return X, Y
def Q1_1():
"""
Code for HW1 Q1.1
"""
global erdosRenyi, smallWorld, collabNet
erdosRenyi = genErdosRenyi(5242, 14484)
smallWorld = genSmallWorld(5242, 14484)
collabNet = loadCollabNet("data/ca-GrQc.txt")
plt.close()
x_erdosRenyi, y_erdosRenyi = getDataPointsToPlot(erdosRenyi)
plt.loglog(x_erdosRenyi, y_erdosRenyi, color='y', label='Erdos Renyi Network')
x_smallWorld, y_smallWorld = getDataPointsToPlot(smallWorld)
plt.loglog(x_smallWorld, y_smallWorld, linestyle='dashed',
color='r', label='Small World Network')
x_collabNet, y_collabNet = getDataPointsToPlot(collabNet)
plt.loglog(x_collabNet, y_collabNet, linestyle='dotted',
color='b', label='Collaboration Network')
plt.xlabel('Node Degree (log)')
plt.ylabel('Proportion of Nodes with a Given Degree (log)')
plt.title(
'Degree Distribution of Erdos Renyi, Small World, ' +
'and Collaboration Networks')
plt.legend()
plt.savefig("output/erdo_small_collab_log_logdegree_distribution", dpi=600)
plt.show()
# Execute code for Q1.1
Q1_1()
# Problem 1.2
# Find max degree of all 3 graphs for plotting (add 2 for padding)
maxdeg = max([erdosRenyi.GetNI((snap.GetMxDegNId(erdosRenyi))).GetDeg(),
smallWorld.GetNI((snap.GetMxDegNId(smallWorld))).GetDeg(),
collabNet.GetNI((snap.GetMxDegNId(collabNet))).GetDeg()]) + 2
# Erdos Renyi
def calcQk(Graph, maxDeg=maxdeg):
"""
:param Graph - snap.PUNGraph object representing an undirected graph
:param maxDeg - maximum degree(+1) for which q_k needs to be calculated
return type: np.array
return: array q_k of dimension maxDeg representing the excess degree
distribution
"""
############################################################################
# TODO: Your code here!
degreeDistribution = snap.TIntPrV()
snap.GetDegCnt(Graph, degreeDistribution)
q_k_p = np.zeros(maxDeg)
for item in degreeDistribution:
degree = item.GetVal1()
if (degree > 0):
q_k_p[degree - 1] = degree * item.GetVal2()
q_k = q_k_p / np.sum(q_k_p)
############################################################################
return q_k
def calcExpectedDegree(Graph):
"""
:param Graph - snap.PUNGraph object representing an undirected graph
return type: float
return: expected degree of Graph
"""
############################################################################
# TODO: Your code here!
ed = 0.0
degreeDistribution = snap.TIntPrV()
snap.GetDegCnt(Graph, degreeDistribution)
N = float(Graph.GetNodes())
for item in degreeDistribution:
ed += float(item.GetVal1()) * float(item.GetVal2()) / N
############################################################################
return ed
def calcExpectedExcessDegree(Graph, qk):
"""
:param Graph - snap.PUNGraph object representing an undirected graph
:param qk - np.array of dimension maxdeg representing excess degree
distribution of `Graph
return type: float
return: expected excess degree of `Graph
"""
############################################################################
# TODO: Your code here!
eed = 0.0
eed = np.average([i for i in xrange(len(qk))], weights=qk)
############################################################################
return eed
def Q1_2_a():
"""
Code for Q1.2a
"""
qk_erdosRenyi = calcQk(erdosRenyi, maxdeg)
qk_smallWorld = calcQk(smallWorld, maxdeg)
qk_collabNet = calcQk(collabNet, maxdeg)
plt.close()
plt.loglog(range(maxdeg), qk_erdosRenyi,
color='y', label='Erdos Renyi Network')
plt.loglog(range(maxdeg), qk_smallWorld, linestyle='dashed',
color='r', label='Small World Network')
plt.loglog(range(maxdeg), qk_collabNet, linestyle='dotted',
color='b', label='Collaboration Network')
plt.xlabel('k Degree')
plt.ylabel('Excess Degree Distribution')
plt.title(
'Excess Degree Distribution of Erdos Renyi, Small World, ' +
'and Collaboration Networks')
plt.legend()
plt.savefig(
"output/erdo_small_collab_log_log_excess_degree_distribution", dpi=600)
plt.show()
# Calculate Expected Degree
ed_erdosRenyi = calcExpectedDegree(erdosRenyi)
ed_smallWorld = calcExpectedDegree(smallWorld)
ed_collabNet = calcExpectedDegree(collabNet)
print 'Expected Degree for Erdos Renyi: %f' % ed_erdosRenyi
print 'Expected Degree for Small World: %f' % ed_smallWorld
print 'Expected Degree for Collaboration Network: %f' % ed_collabNet
# Calculate Expected Excess Degree
eed_erdosRenyi = calcExpectedExcessDegree(erdosRenyi, qk_erdosRenyi)
eed_smallWorld = calcExpectedExcessDegree(smallWorld, qk_smallWorld)
eed_collabNet = calcExpectedExcessDegree(collabNet, qk_collabNet)
print 'Expected Excess Degree for Erdos Renyi: %f' % (eed_erdosRenyi)
print 'Expected Excess Degree for Small World: %f' % (eed_smallWorld)
print 'Expected Excess Degree for Collaboration Network: %f' % (eed_collabNet)
# Execute code for Q1.2a
Q1_2_a()
# Problem 1.3 - Clustering Coefficient
def calcClusteringCoefficient(Graph):
"""
:param - Graph: snap.PUNGraph object representing an undirected graph
return type: float
returns: clustering coeffient of `Graph
"""
############################################################################
# TODO: Your code here!
C = 0.0
total = 0.0
for node in Graph.Nodes():
ki = node.GetDeg()
numConnectedNeighbors = 0.0
if ki >= 2:
neighborIDs = [node.GetNbrNId(i)
for i in xrange(node.GetDeg())]
for neighborID in neighborIDs:
neighbor = Graph.GetNI(neighborID)
candidates = [neighbor.GetNbrNId(i)
for i in xrange(neighbor.GetDeg())]
for candidate in candidates:
if node.IsNbrNId(candidate):
numConnectedNeighbors += 1
total += numConnectedNeighbors / (ki * (ki - 1))
C = total / float(Graph.GetNodes())
# Sanity check.
assert abs(C - snap.GetClustCf(Graph)) < 1e-8
############################################################################
return C
def Q1_3():
"""
Code for Q1.3
"""
C_erdosRenyi = calcClusteringCoefficient(erdosRenyi)
C_smallWorld = calcClusteringCoefficient(smallWorld)
C_collabNet = calcClusteringCoefficient(collabNet)
print('Clustering Coefficient for Erdos Renyi Network: %f' % C_erdosRenyi)
print('Clustering Coefficient for Small World Network: %f' % C_smallWorld)
print('Clustering Coefficient for Collaboration Network: %f' % C_collabNet)
# Execute code for Q1.3
Q1_3()
|
python
|
import os
def cpr(parentFile, pasteDir, names):
parentFileContent = open(parentFile, "rb").read()
extension = os.path.splitext(parentFile)[1]
for name in names:
name = str(name)
currentDir = os.path.join(pasteDir, name)+extension
currentFile = open(currentDir, "wb")
currentFile.write(parentFileContent)
currentFile.close()
def xor(a, b):
return a | b
def tupleRange(a, b):
xList = []
yList = []
for x in zip(range(a[0], b[0]+1)):
xList.append(x[0])
for y in zip(range(a[1], b[1]+1)):
yList.append(y[0])
tuples = []
lastX = 0
lastY = 0
endAt = max(len(xList), len(yList))
index = 0
while True:
try:
lastX = xList[index]
except:
pass
try:
lastY = yList[index]
except:
pass
tuples.append((lastX, lastY))
if index >= endAt:
break
index += 1
return tuples
def genProps(sides, name, p_id, path, tiles, method, metadata):
try:
f = open(os.path.join(path, name)+".properties", "x")
except:
f = open(os.path.join(path, name)+".properties", "w")
f.write(f"method={method}\n")
f.write(f"tiles=0-{str(tiles)}\n")
if sides != "":
f.write(f"sides={sides}\n")
else:
pass
f.write(f"matchBlocks={p_id}\n")
if sides != 0:
f.write(f"metadata={metadata}")
else:
pass
f.close()
class Side():
def __init__(self, l, r, u, d):
self.x1 = l
self.y1 = r
self.x2 = u
self.y2 = d
class Map():
def __init__(self, pattern = [
[1,1,1],
[1,0,1],
[1,1,1]
]):
self.pat = pattern
def __add__(self, other):
newPattern = [[0,0,0], [0,0,0], [0,0,0]]
for x1, x2, indX in zip(self.pat, other.pat, range(len(other.pat))):
for y1, y2, indY in zip(x1, x2, range(len(x2))):
newPattern[indX][indY] = xor(y1, y2)
return Map(newPattern)
def __repr__(self):
string = ""
for x1 in self.pat:
string += "\n"
for y1 in x1:
string += f"{y1}"
return string
@property
def T(self):
return self.pat[0][1]
@property
def B(self):
return self.pat[2][1]
@property
def R(self):
return self.pat[1][2]
@property
def L(self):
return self.pat[1][0]
@property
def TR(self):
return self.pat[0][2]
@property
def BR(self):
return self.pat[2][2]
@property
def TL(self):
return self.pat[0][0]
@property
def BL(self):
return self.pat[2][0]
class Top(Map):
def __init__(self):
super().__init__([
[1,1,1],
[0,0,0],
[0,0,0]
])
class Down(Map):
def __init__(self):
super().__init__([
[0,0,0],
[0,0,0],
[1,1,1]
])
class Left(Map):
def __init__(self):
super().__init__([
[1,0,0],
[1,0,0],
[1,0,0]
])
class Right(Map):
def __init__(self):
super().__init__([
[0,0,1],
[0,0,1],
[0,0,1]
])
class TopLeft(Map):
def __init__(self):
super().__init__([
[1,0,0],
[0,0,0],
[0,0,0]
])
class DownLeft(Map):
def __init__(self):
super().__init__([
[0,0,0],
[0,0,0],
[1,0,0]
])
class TopRight(Map):
def __init__(self):
super().__init__([
[0,0,1],
[0,0,0],
[0,0,0]
])
class DownRight(Map):
def __init__(self):
super().__init__([
[0,0,0],
[0,0,0],
[0,0,1]
])
|
python
|
import time
import bisect
import dgl
import torch
import numpy as np
from tqdm import tqdm
# Count motif 1
def count_motif_1_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == dst)
src_out_ngr_2, src_out_timestamp_2 = src_out_ngr[mask_2], src_out_timestamp[mask_2]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
for i, node_i in enumerate(src_out_ngr_2):
left_margin_3, right_margin_3 = src_out_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(src_in_timestamp > left_margin_3, src_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_in_ngr == dst)
g.edata['motif_count'][eid][0] += torch.sum(mask_3)
def count_motif_1_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr == dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_3, right_margin_3 = pivot, src_out_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_in_timestamp > left_margin_3, src_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_in_ngr == dst)
g.edata['motif_count'][eid][1] += torch.sum(mask_3)
def count_motif_1_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_in_timestamp >= left_margin_1, src_in_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_in_ngr == dst)
src_in_ngr_1, src_in_timestamp_1 = src_in_ngr[mask_1], src_in_timestamp[mask_1]
for i, node_i in enumerate(src_in_ngr_1):
left_margin_2, right_margin_2 = src_in_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_in_timestamp > left_margin_2, src_in_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_in_ngr == dst)
g.edata['motif_count'][eid][2] += torch.sum(mask_2)
# Count motif 2
def count_motif_2_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_3, right_margin_3 = pivot, pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == dst)
src_out_ngr_3, src_out_timestamp_3 = src_out_ngr[mask_3], src_out_timestamp[mask_3]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
for i, node_i in enumerate(src_out_ngr_3):
left_margin_2, right_margin_2 = pivot, src_out_timestamp_3[i]
mask_2 = torch.logical_and(src_in_timestamp > left_margin_2, src_in_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_in_ngr == dst)
g.edata['motif_count'][eid][3] += torch.sum(mask_2)
def count_motif_2_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid].item()
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_in_timestamp >= left_margin_1, src_in_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_in_ngr == dst)
src_in_ngr_1, src_in_timestamp_1 = src_in_ngr[mask_1], src_in_timestamp[mask_1]
for i, node_i in enumerate(src_in_ngr_1):
left_margin_3, right_margin_3 = pivot, src_in_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_in_timestamp > left_margin_3, src_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_in_ngr == dst)
g.edata['motif_count'][eid][4] += torch.sum(mask_3)
def count_motif_2_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid].item()
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr == dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_in_timestamp > left_margin_2, src_in_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_in_ngr == dst)
g.edata['motif_count'][eid][5] += torch.sum(mask_2)
# Count motif 3
def count_motif_3_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(src_in_timestamp > left_margin_2, src_in_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, src_in_ngr == dst)
src_in_ngr_2, src_in_timestamp_2 = src_in_ngr[mask_2], src_in_timestamp[mask_2]
for i, node_i in enumerate(src_in_ngr_2):
left_margin_3, right_margin_3 = src_in_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(src_in_timestamp > left_margin_3, src_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_in_ngr == dst)
g.edata['motif_count'][eid][6] += torch.sum(mask_3)
def count_motif_3_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_in_timestamp >= left_margin_1, src_in_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_in_ngr == dst)
src_in_ngr_1, src_in_timestamp_1 = src_in_ngr[mask_1], src_in_timestamp[mask_1]
for i, node_i in enumerate(src_in_ngr_1):
left_margin_3, right_margin_3 = pivot, src_in_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > pivot, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == dst)
g.edata['motif_count'][eid][7] += torch.sum(mask_3)
def count_motif_3_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_in_timestamp >= left_margin_1, src_in_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_in_ngr == dst)
src_in_ngr_1, src_in_timestamp_1 = src_in_ngr[mask_1], src_in_timestamp[mask_1]
for i, node_i in enumerate(src_in_ngr_1):
left_margin_2, right_margin_2 = src_in_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == dst)
g.edata['motif_count'][eid][8] += torch.sum(mask_2)
# Count motif 4
def count_motif_4_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == dst)
src_out_ngr_2, src_out_timestamp_2 = src_out_ngr[mask_2], src_out_timestamp[mask_2]
for i, node_i in enumerate(src_out_ngr_2):
left_margin_3, right_margin_3 = src_out_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr != node_i)
g.edata['motif_count'][eid][9] += torch.sum(mask_3)
def count_motif_4_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr == dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_3, right_margin_3 = pivot, src_out_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr != node_i)
g.edata['motif_count'][eid][10] += torch.sum(mask_3)
def count_motif_4_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == node_i)
g.edata['motif_count'][eid][11] += torch.sum(mask_2)
# Count motif 5
def count_motif_5_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_3, right_margin_3 = pivot, pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == dst)
src_out_ngr_3, src_out_timestamp_3 = src_out_ngr[mask_3], src_out_timestamp[mask_3]
for i, node_i in enumerate(src_out_ngr_3):
left_margin_2, right_margin_2 = pivot, src_out_timestamp_3[i]
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr != node_i)
g.edata['motif_count'][eid][12] += torch.sum(mask_2)
def count_motif_5_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_3, right_margin_3 = pivot, src_out_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == node_i)
g.edata['motif_count'][eid][13] += torch.sum(mask_3)
def count_motif_5_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr == dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr != node_i)
g.edata['motif_count'][eid][14] += torch.sum(mask_2)
# Count motif 6
def count_motif_6_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr != dst)
src_out_ngr_2, src_out_timestamp_2 = src_out_ngr[mask_2], src_out_timestamp[mask_2]
for i, node_i in enumerate(src_out_ngr_2):
left_margin_3, right_margin_3 = src_out_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == node_i)
g.edata['motif_count'][eid][15] += torch.sum(mask_3)
def count_motif_6_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_3, right_margin_3 = pivot, src_out_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr == dst)
g.edata['motif_count'][eid][16] += torch.sum(mask_3)
def count_motif_6_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == dst)
src_out_ngr_2 = src_out_ngr[mask_2]
g.edata['motif_count'][eid][17] += torch.sum(mask_2)
# Count motif 7
def count_motif_7_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(src_in_timestamp > left_margin_2, src_in_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, src_in_ngr == dst)
src_in_ngr_2, src_in_timestamp_2 = src_in_ngr[mask_2], src_in_timestamp[mask_2]
for i, node_i in enumerate(src_in_ngr_2):
left_margin_3, right_margin_3 = src_in_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr != dst)
g.edata['motif_count'][eid][18] += torch.sum(mask_3)
def count_motif_7_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_in_timestamp >= left_margin_1, src_in_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_in_ngr == dst)
src_in_ngr_1, src_in_timestamp_1 = src_in_ngr[mask_1], src_in_timestamp[mask_1]
dst_out_timestamp = g.edata['timestamp'][g.out_edges(dst, form='eid')]
dst_out_ngr = g.out_edges(dst)[1]
for i, node_i in enumerate(src_in_ngr_1):
left_margin_3, right_margin_3 = pivot, src_in_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(dst_out_timestamp > left_margin_3, dst_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, dst_out_ngr != src)
g.edata['motif_count'][eid][19] += torch.sum(mask_3)
def count_motif_7_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_in_timestamp > left_margin_2, src_in_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_in_ngr == node_i)
g.edata['motif_count'][eid][20] += torch.sum(mask_2)
# Count motif 8
def count_motif_8_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_3, right_margin_3 = pivot, pivot + threshold_time
mask_3 = torch.logical_and(src_in_timestamp > left_margin_3, src_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_in_ngr == dst)
src_in_ngr_3, src_in_timestamp_3 = src_in_ngr[mask_3], src_in_timestamp[mask_3]
for i, node_i in enumerate(src_in_ngr_3):
left_margin_2, right_margin_2 = pivot, src_in_timestamp_3[i],
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr != dst)
g.edata['motif_count'][eid][21] += torch.sum(mask_2)
def count_motif_8_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_3, right_margin_3 = pivot, pivot + threshold_time
mask_3 = torch.logical_and(src_in_timestamp > left_margin_3, src_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_in_ngr != dst)
src_in_ngr_3, src_in_timestamp_3 = src_in_ngr[mask_3], src_in_timestamp[mask_3]
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
for i, node_i in enumerate(src_in_ngr_3):
if not g.has_edges_between(src, node_i):
continue
left_margin_1, right_margin_1 = src_in_timestamp_3[i] - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr == node_i)
g.edata['motif_count'][eid][22] += torch.sum(mask_1)
def count_motif_8_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
dst_out_timestamp = g.edata['timestamp'][g.out_edges(dst, form='eid')]
dst_out_ngr = g.out_edges(dst)[1]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_in_timestamp >= left_margin_1, src_in_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_in_ngr == dst)
src_in_ngr_1, src_in_timestamp_1 = src_in_ngr[mask_1], src_in_timestamp[mask_1]
for i, node_i in enumerate(src_in_ngr_1):
left_margin_2, right_margin_2 = src_in_timestamp_1[i], pivot
mask_2 = torch.logical_and(dst_out_timestamp > left_margin_2, dst_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, dst_out_ngr != src)
g.edata['motif_count'][eid][23] += torch.sum(mask_2)
# Count motif 9
def count_motif_9_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_3, right_margin_3 = pivot, pivot + threshold_time
mask_3 = torch.logical_and(src_in_timestamp > left_margin_3, src_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_in_ngr != dst)
src_in_ngr_3, src_in_timestamp_3 = src_in_ngr[mask_3], src_in_timestamp[mask_3]
for i, node_i in enumerate(src_in_ngr_3):
if not g.has_edges_between(src, node_i):
continue
left_margin_2, right_margin_2 = pivot, src_in_timestamp_3[i],
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr == node_i)
g.edata['motif_count'][eid][24] += torch.sum(mask_2)
def count_motif_9_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_3, right_margin_3 = pivot, pivot + threshold_time
mask_3 = torch.logical_and(src_in_timestamp > left_margin_3, src_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_in_ngr == dst)
src_in_ngr_3, src_in_timestamp_3 = src_in_ngr[mask_3], src_in_timestamp[mask_3]
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
for i, node_i in enumerate(src_in_ngr_3):
left_margin_1, right_margin_1 = src_in_timestamp_3[i] - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
g.edata['motif_count'][eid][25] += torch.sum(mask_1)
def count_motif_9_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
if not g.has_edges_between(dst, src):
return
dst_out_timestamp = g.edata['timestamp'][g.out_edges(dst, form='eid')]
dst_out_ngr = g.out_edges(dst)[1]
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot - threshold_time, pivot
mask_2 = torch.logical_and(src_in_timestamp >= left_margin_2, src_in_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_in_ngr == dst)
src_in_ngr_2, src_in_timestamp_2 = src_in_ngr[mask_2], src_in_timestamp[mask_2]
for i, node_i in enumerate(src_in_ngr_2):
left_margin_1, right_margin_1 = pivot - threshold_time, src_in_timestamp_2[i]
mask_1 = torch.logical_and(dst_out_timestamp >= left_margin_1, dst_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, dst_out_ngr != src)
g.edata['motif_count'][eid][26] += torch.sum(mask_1)
# Count motif 10
def count_motif_10_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
dst_out_timestamp = g.edata['timestamp'][g.out_edges(dst, form='eid')]
dst_out_ngr = g.out_edges(dst)[1]
if len(src_in_ngr) == 0 or len(dst_out_ngr) == 0:
return
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(dst_out_timestamp > left_margin_2, dst_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, dst_out_ngr != src)
dst_out_ngr_2, dst_out_timestamp_2 = dst_out_ngr[mask_2], dst_out_timestamp[mask_2]
for i, node_i in enumerate(dst_out_ngr_2):
left_margin_3, right_margin_3 = dst_out_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(src_in_timestamp > left_margin_3, src_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_in_ngr == node_i)
g.edata['motif_count'][eid][27] += torch.sum(mask_3)
def count_motif_10_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
dst_out_timestamp = g.edata['timestamp'][g.out_edges(dst, form='eid')]
dst_out_ngr = g.out_edges(dst)[1]
if len(src_in_ngr) == 0 or len(dst_out_ngr) == 0:
return
pivot = g.edata['timestamp'][eid]
left_margin_3, right_margin_3 = pivot, pivot + threshold_time
mask_3 = torch.logical_and(dst_out_timestamp > left_margin_3, dst_out_timestamp < right_margin_3)
mask_3 = torch.logical_and(mask_3, dst_out_ngr != src)
dst_out_ngr_3, dst_out_timestamp_3 = dst_out_ngr[mask_3], dst_out_timestamp[mask_3]
for i, node_i in enumerate(dst_out_ngr_3):
left_margin_1, right_margin_1 = dst_out_timestamp_3[i] - threshold_time, pivot
mask_1 = torch.logical_and(src_in_timestamp >= left_margin_1, src_in_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_in_ngr == node_i)
g.edata['motif_count'][eid][28] += torch.sum(mask_1)
def count_motif_10_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
dst_out_timestamp = g.edata['timestamp'][g.out_edges(dst, form='eid')]
dst_out_ngr = g.out_edges(dst)[1]
if len(src_in_ngr) == 0 or len(dst_out_ngr) == 0:
return
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(dst_out_timestamp >= left_margin_1, dst_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, dst_out_ngr != src)
dst_out_ngr_1, dst_out_timestamp_1 = dst_out_ngr[mask_1], dst_out_timestamp[mask_1]
for i, node_i in enumerate(dst_out_ngr_1):
left_margin_2, right_margin_2 = dst_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(src_in_timestamp > left_margin_2, src_in_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, src_in_ngr == node_i)
g.edata['motif_count'][eid][29] += torch.sum(mask_2)
# Count motif 11
def count_motif_11_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
dst_out_timestamp = g.edata['timestamp'][g.out_edges(dst, form='eid')]
dst_out_ngr = g.out_edges(dst)[1]
if len(src_out_ngr) == 0 or len(dst_out_ngr) == 0:
return
pivot = g.edata['timestamp'][eid]
left_margin_2, right_margin_2 = pivot, pivot + threshold_time
mask_2 = torch.logical_and(src_out_timestamp > left_margin_2, src_out_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, src_out_ngr != dst)
src_out_ngr_2, src_out_timestamp_2 = src_out_ngr[mask_2], src_out_timestamp[mask_2]
for i, node_i in enumerate(src_out_ngr_2):
left_margin_3, right_margin_3 = src_out_timestamp_2[i], pivot + threshold_time
mask_3 = torch.logical_and(dst_out_timestamp > left_margin_3, dst_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, dst_out_ngr == node_i)
g.edata['motif_count'][eid][30] += torch.sum(mask_3)
def count_motif_11_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
dst_in_timestamp = g.edata['timestamp'][g.in_edges(dst, form='eid')]
dst_in_ngr = g.in_edges(dst)[0]
if len(src_out_ngr) == 0 or len(dst_in_ngr) == 0:
return
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_3, right_margin_3 = pivot, src_out_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(dst_in_timestamp > left_margin_3, dst_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, dst_in_ngr == node_i)
g.edata['motif_count'][eid][31] += torch.sum(mask_3)
def count_motif_11_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
dst_in_timestamp = g.edata['timestamp'][g.in_edges(dst, form='eid')]
dst_in_ngr = g.in_edges(dst)[0]
if len(src_in_ngr) == 0 or len(dst_in_ngr) == 0:
return
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_in_timestamp >= left_margin_1, src_in_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_in_ngr != dst)
src_in_ngr_1, src_in_timestamp_1 = src_in_ngr[mask_1], src_in_timestamp[mask_1]
for i, node_i in enumerate(src_in_ngr_1):
left_margin_2, right_margin_2 = src_in_timestamp_1[i], pivot
mask_2 = torch.logical_and(dst_in_timestamp > left_margin_2, dst_in_timestamp <= right_margin_2)
mask_2 = torch.logical_and(mask_2, dst_in_ngr == node_i)
g.edata['motif_count'][eid][32] += torch.sum(mask_2)
# Count motif 12
def count_motif_12_1(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
dst_out_timestamp = g.edata['timestamp'][g.out_edges(dst, form='eid')]
dst_out_ngr = g.out_edges(dst)[1]
if len(src_out_ngr) == 0 or len(dst_out_ngr) == 0:
return
pivot = g.edata['timestamp'][eid]
left_margin_3, right_margin_3 = pivot, pivot + threshold_time
mask_3 = torch.logical_and(src_out_timestamp > left_margin_3, src_out_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, src_out_ngr != dst)
src_out_ngr_3, src_out_timestamp_3 = src_out_ngr[mask_3], src_out_timestamp[mask_3]
for i, node_i in enumerate(src_out_ngr_3):
left_margin_2, right_margin_2 = pivot, src_out_timestamp_3[i]
mask_2 = torch.logical_and(dst_out_timestamp > left_margin_2, dst_out_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, dst_out_ngr == node_i)
g.edata['motif_count'][eid][33] += torch.sum(mask_2)
def count_motif_12_2(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_in_timestamp = g.edata['timestamp'][g.in_edges(src, form='eid')]
src_in_ngr = g.in_edges(src)[0]
dst_in_timestamp = g.edata['timestamp'][g.in_edges(dst, form='eid')]
dst_in_ngr = g.in_edges(dst)[0]
if len(src_in_ngr) == 0 or len(dst_in_ngr) == 0:
return
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_in_timestamp >= left_margin_1, src_in_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_in_ngr != dst)
src_in_ngr_1, src_in_timestamp_1 = src_in_ngr[mask_1], src_in_timestamp[mask_1]
for i, node_i in enumerate(src_in_ngr_1):
left_margin_3, right_margin_3 = pivot, src_in_timestamp_1[i] + threshold_time
mask_3 = torch.logical_and(dst_in_timestamp > left_margin_3, dst_in_timestamp <= right_margin_3)
mask_3 = torch.logical_and(mask_3, dst_in_ngr == node_i)
g.edata['motif_count'][eid][34] += torch.sum(mask_3)
def count_motif_12_3(g, threshold_time, eid):
src, dst = g.find_edges(eid)
src_out_timestamp = g.edata['timestamp'][g.out_edges(src, form='eid')]
src_out_ngr = g.out_edges(src)[1]
dst_in_timestamp = g.edata['timestamp'][g.in_edges(dst, form='eid')]
dst_in_ngr = g.in_edges(dst)[0]
if len(src_out_ngr) == 0 or len(dst_in_ngr) == 0:
return
pivot = g.edata['timestamp'][eid]
left_margin_1, right_margin_1 = pivot - threshold_time, pivot
mask_1 = torch.logical_and(src_out_timestamp >= left_margin_1, src_out_timestamp < right_margin_1)
mask_1 = torch.logical_and(mask_1, src_out_ngr != dst)
src_out_ngr_1, src_out_timestamp_1 = src_out_ngr[mask_1], src_out_timestamp[mask_1]
for i, node_i in enumerate(src_out_ngr_1):
left_margin_2, right_margin_2 = src_out_timestamp_1[i], pivot
mask_2 = torch.logical_and(dst_in_timestamp > left_margin_2, dst_in_timestamp < right_margin_2)
mask_2 = torch.logical_and(mask_2, dst_in_ngr == node_i)
g.edata['motif_count'][eid][35] += torch.sum(mask_2)
def count_homogeneous_graph(g, threshold_time):
func_ls = [count_motif_1_1, count_motif_1_2, count_motif_1_3,
count_motif_2_1, count_motif_2_2, count_motif_2_3,
count_motif_3_1, count_motif_3_2, count_motif_3_3,
count_motif_4_1, count_motif_4_2, count_motif_4_3,
count_motif_5_1, count_motif_5_2, count_motif_5_3,
count_motif_6_1, count_motif_6_2, count_motif_6_3,
count_motif_7_1, count_motif_7_2, count_motif_7_3,
count_motif_8_1, count_motif_8_2, count_motif_8_3,
count_motif_9_1, count_motif_9_2, count_motif_9_3,
count_motif_10_1, count_motif_10_2, count_motif_10_3,
count_motif_11_1, count_motif_11_2, count_motif_11_3,
count_motif_12_1, count_motif_12_2, count_motif_12_3]
g.edata['motif_count'] = torch.zeros(g.number_of_edges(), 36)
for eid in tqdm(range(g.num_edges())):
for f in func_ls:
f(g, threshold_time, eid)
|
python
|
# coding: utf8
from pycropml.transpiler.main import Main
source = u"""def test(int a):
cdef list g=[14,15,12,12]
cdef int i
a=12
for i in g:
a=a+i
return a
"""
output_cs=u"""using System;
using System.Collections.Generic;
public class Program
{
static int test(int a)
{
List<int> g= new List<int>{14, 15, 12, 12};
int i;
a = 12;
foreach(int i in g)
{
a = a + i;
}
return a;
}
}"""
output_py=u"""def test(a):
g = [14, 15, 12, 12]
a = 12
for i in g:
a = a + i
return a"""
languages=["py","cs"]
output={"py":output_py, "cs":output_cs}
def test_for_statement():
for lang in languages:
test=Main(source, lang)
test.parse()
test.to_ast(source)
code=test.to_source()
print(code)
assert(code==output[lang])
if __name__=='__main__':
test_for_statement()
|
python
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import posixpath
import sys
try:
import pycurl
# pycurl is not necessary for testcases, mock it
except ImportError:
from mock.mock import MagicMock
pycurl = MagicMock()
import cStringIO
import StringIO
import pdb
try:
import json
except ImportError:
import simplejson as json
from ambari_client.core.http_utils import uri_encoding
__docformat__ = "epytext"
LOG = logging.getLogger(__name__)
class HttpClient(object):
"""
Basic HTTP client for rest APIs.
"""
def __init__(self, host_url, user_name , password ):
"""
@param host_url: The base url to the API.
"""
self._host_url = host_url.rstrip('/')
self._headers = { }
self.c = pycurl.Curl()
if user_name is not None:
self.c.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
userpass = user_name + ':'
if password is not None:
userpass += password
LOG.debug( "pycurl.USERPWD value = "+str(userpass))
self.c.setopt(pycurl.USERPWD, userpass)
def set_headers(self, headers):
"""
Add headers to the request
"""
self._headers = headers
return self
@property
def host_url(self):
return self._host_url
def _get_headers(self, headers):
res = self._headers.copy()
if headers:
res.update(headers)
return res
def invoke(self, http_method, path, payload=None, headers=None):
"""
Submit an HTTP request.
@param http_method: GET, POST, PUT, DELETE
@param path: The path of the resource.
@param payload: The payload to attach to the body of the request.
@param headers: The headers to set for this request.
@return: The result of REST request
"""
#pdb.set_trace()
LOG.debug ("invoke : http_method = "+str(http_method))
# Prepare URL and params
url = self._normalize(path)
if http_method in ("GET", "DELETE"):
if payload is not None:
self.logger.warn(
"GET http_method does not pass any payload. Path '%s'" % (path,))
payload = None
buf = cStringIO.StringIO()
self.c.setopt(pycurl.WRITEFUNCTION, buf.write)
self.c.setopt(pycurl.SSL_VERIFYPEER, 0)
LOG.debug ("invoke : url = "+str(url))
# set http_method
if http_method == "GET":
self.c.setopt(pycurl.HTTPGET, 1)
elif http_method == "HEAD":
self.c.setopt(pycurl.HTTPGET, 1)
self.c.setopt(pycurl.NOBODY, 1)
elif http_method == "POST":
self.c.setopt(pycurl.POST, 1)
elif http_method == "PUT":
self.c.setopt(pycurl.UPLOAD, 1)
else:
self.c.setopt(pycurl.CUSTOMREQUEST, http_method)
if http_method in ('POST','PUT'):
LOG.debug( "data..........."+str(payload))
data = json.dumps(payload)
data= data.decode('unicode-escape')
LOG.debug( data)
data = self._to_bytestring(data)
LOG.debug( data)
content = StringIO.StringIO(data)
LOG.debug( content)
content_length = len(data)
LOG.debug( "content_length........."+str(content_length))
if http_method == 'POST':
self.c.setopt(pycurl.POSTFIELDSIZE, content_length)
else:
self.c.setopt(pycurl.INFILESIZE, content_length)
self.c.setopt(pycurl.READFUNCTION, content.read)
self.c.setopt(self.c.URL, url)
headers = self._get_headers(headers)
self.c.setopt(pycurl.HTTPHEADER,
["%s: %s" % pair for pair in sorted(headers.iteritems())])
LOG.debug ("invoke : pycurl.EFFECTIVE_URL = "+self.c.getinfo(pycurl.EFFECTIVE_URL))
try:
self.c.perform()
except Exception, ex:
LOG.debug (sys.stderr, str(ex))
raise ex
contents_type= self.c.getinfo(pycurl.CONTENT_TYPE)
LOG.debug ("invoke : pycurl.CONTENT_TYPE = "+contents_type)
code = self.c.getinfo(pycurl.RESPONSE_CODE)
LOG.debug ("invoke : pycurl.RESPONSE_CODE = "+str(code))
response = buf.getvalue()
buf.close()
LOG.debug ("invoke : COMPLETED ")
return response , code , contents_type
def _to_bytestring(self ,s):
# if not isinstance(s, basestring):
# raise TypeError("value should be a str or unicode")
if isinstance(s, unicode):
return s.encode('utf-8')
return s
def _normalize(self, path):
res = self._host_url
if path:
res += posixpath.normpath('/' + path.lstrip('/'))
return uri_encoding(res)
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-10-16 07:07
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('cart', '0005_auto_20180111_0700'),
]
operations = [
migrations.AddField(
model_name='cartline',
name='package_offer_data',
field=jsonfield.fields.JSONField(blank=True, default={}, verbose_name='package_offer_data'),
),
]
|
python
|
from tkinter import *
import sys
# Medicine Menu
def mclickedbtn1():
print("Hello")
def mclickedbtn2():
print("Hello")
def mclickedbtn3():
print("Hello")
def mclickedbtn4():
print("Hello")
def clickedbtn1():
medicine_menu_window = Tk()
medicine_menu_window.geometry('350x200')
medicine_menu_window.title("Pharmacy Management Software")
lbl = Label(medicine_menu_window, text="Medicine Menu!")
lbl.grid(column=0, row=0)
lbl2 = Label(medicine_menu_window, text="What would you like to do!")
lbl2.grid(column=0, row=1)
btn1 = Button(medicine_menu_window, text="Add New Medicine",fg="red", command=mclickedbtn1)
btn1.grid(column=0, row=2)
btn2 = Button(medicine_menu_window, text="Search Medicine",fg="red", command=mclickedbtn2)
btn2.grid(column=0, row=3)
btn3 = Button(medicine_menu_window, text="Update Medicine",fg="red", command=mclickedbtn3)
btn3.grid(column=0, row=4)
btn4 = Button(medicine_menu_window, text="Medicines to be purchased",fg="red", command=mclickedbtn4)
btn4.grid(column=0, row=5)
btn4 = Button(medicine_menu_window, text="Return to main menu",fg="red", command=mclickedbtn4)
btn4.grid(column=0, row=6)
medicine_menu_window.mainloop()
#Customer Menu
def clickedbtn2():
c_menu_window = Tk()
c_menu_window.geometry('350x200')
c_menu_window.title("Pharmacy Management Software")
lbl = Label(c_menu_window, text="Customer Menu!")
lbl.grid(column=0, row=0)
lbl2 = Label(c_menu_window, text="What would you like to do!")
lbl2.grid(column=0, row=1)
btn1 = Button(c_menu_window, text="Search Customer",fg="red", command=mclickedbtn1)
btn1.grid(column=0, row=2)
btn2 = Button(c_menu_window, text="New Customer",fg="red", command=mclickedbtn2)
btn2.grid(column=0, row=3)
btn3 = Button(c_menu_window, text="Update Customer Info",fg="red", command=mclickedbtn3)
btn3.grid(column=0, row=4)
btn4 = Button(c_menu_window, text="Return to main menu",fg="red", command=mclickedbtn4)
btn4.grid(column=0, row=5)
c_menu_window.mainloop()
#Supplier Menu
def clickedbtn3():
s_menu_window = Tk()
s_menu_window.geometry('350x200')
s_menu_window.title("Pharmacy Management Software")
lbl = Label(s_menu_window, text="Supplier Menu!")
lbl.grid(column=0, row=0)
lbl2 = Label(s_menu_window, text="What would you like to do!")
lbl2.grid(column=0, row=1)
btn1 = Button(s_menu_window, text="Search Supplier",fg="red", command=mclickedbtn1)
btn1.grid(column=0, row=2)
btn2 = Button(s_menu_window, text="New Supplier",fg="red", command=mclickedbtn2)
btn2.grid(column=0, row=3)
btn3 = Button(s_menu_window, text="Update Supplier Info",fg="red", command=mclickedbtn3)
btn3.grid(column=0, row=4)
btn4 = Button(s_menu_window, text="Return to main menu",fg="red", command=mclickedbtn4)
btn4.grid(column=0, row=5)
s_menu_window.mainloop()
#Report Menu
def clickedbtn4():
r_menu_window = Tk()
r_menu_window.geometry('350x200')
r_menu_window.title("Pharmacy Management Software")
lbl = Label(r_menu_window, text="Supplier Menu!")
lbl.grid(column=0, row=0)
lbl2 = Label(r_menu_window, text="What would you like to do!")
lbl2.grid(column=0, row=1)
btn1 = Button(r_menu_window, text="Day Sales",fg="red", command=mclickedbtn1)
btn1.grid(column=0, row=2)
btn2 = Button(r_menu_window, text="Month Sales",fg="red", command=mclickedbtn2)
btn2.grid(column=0, row=3)
btn3 = Button(r_menu_window, text="Day Purchase",fg="red", command=mclickedbtn3)
btn3.grid(column=0, row=4)
btn3 = Button(r_menu_window, text="Month Purchase",fg="red", command=mclickedbtn3)
btn3.grid(column=0, row=5)
btn3 = Button(r_menu_window, text="Profit Report",fg="red", command=mclickedbtn3)
btn3.grid(column=0, row=6)
btn4 = Button(r_menu_window, text="Return to main menu",fg="red", command=mclickedbtn4)
btn4.grid(column=0, row=7)
r_menu_window.mainloop()
#Invoicing Menu
def clickedbtn5():
r_menu_window = Tk()
r_menu_window.geometry('350x200')
r_menu_window.title("Pharmacy Management Software")
lbl = Label(r_menu_window, text="Invoice Menu!")
lbl.grid(column=0, row=0)
lbl2 = Label(r_menu_window, text="What would you like to do!")
lbl2.grid(column=0, row=1)
btn1 = Button(r_menu_window, text="Supplier Invoice",fg="red", command=mclickedbtn1)
btn1.grid(column=0, row=2)
btn2 = Button(r_menu_window, text="Customer Invoice",fg="red", command=mclickedbtn2)
btn2.grid(column=0, row=3)
btn4 = Button(r_menu_window, text="Return to main menu",fg="red", command=mclickedbtn4)
btn4.grid(column=0, row=4)
r_menu_window.mainloop()
#Main Menu
window = Tk()
window.geometry('350x200')
window.title("Pharmacy Management Software")
lbl = Label(window, text="Welcome to Pharmacy Management Software!")
lbl.grid(column=0, row=0)
lbl2 = Label(window, text="What would you like to do!")
lbl2.grid(column=0, row=1)
btn1 = Button(window, text="Medicine Menu",fg="red", command=clickedbtn1)
btn1.grid(column=0, row=2)
btn2 = Button(window, text="Customer Menu",fg="red", command=clickedbtn2)
btn2.grid(column=0, row=3)
btn3 = Button(window, text="Supplier Menu",fg="red", command=clickedbtn3)
btn3.grid(column=0, row=4)
btn4 = Button(window, text="Report Menu",fg="red", command=clickedbtn4)
btn4.grid(column=0, row=5)
btn5 = Button(window, text="Invoicing Menu",fg="red", command=clickedbtn5)
btn5.grid(column=0, row=6)
window.mainloop()
|
python
|
# coding: utf-8
# # Explore offset vector
#
# We want to know what the offset vector is capturing. Theoretically it should be capturing the "essence of gene A" since it is defined by taking the samples with the highest expression of gene A and the lowest expression of gene A.
#
# We want to test if this offset vector is capturing genes in group A and B
# In[1]:
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
randomState = 123
from numpy.random import seed
seed(randomState)
# In[2]:
# Load data
base_dir = os.path.join(os.path.dirname(os.getcwd()), "data")
analysis_name = "sim_AB_2775_300_v2"
offset_gene_file = os.path.join(os.path.dirname(os.getcwd()), "data", analysis_name, "offset_gene_space.txt")
offset_vae_file = os.path.join(os.path.dirname(os.getcwd()), "encoded", analysis_name, "offset_latent_space_vae.txt")
A_file = os.path.join(base_dir, analysis_name, "geneSetA.txt")
B_file = os.path.join(base_dir, analysis_name, "geneSetB.txt")
weight_file = os.path.join(os.path.dirname(os.getcwd()), "data", analysis_name, "VAE_weight_matrix.txt")
# In[3]:
# Read gene space offset
offset_gene_space = pd.read_table(offset_gene_file, header=0, index_col=0)
offset_gene_space
# In[4]:
# Read VAE space offset
offset_vae_space = pd.read_table(offset_vae_file, header=0, index_col=0)
offset_vae_space
# In[5]:
# Read genes in set A
geneSetA = pd.read_table(A_file, header=0, index_col=0)
geneSetA_ls = [l[0] for l in geneSetA.values.tolist()]
geneSetA_set = set(geneSetA_ls)
# In[6]:
# Read genes in set B
geneSetB = pd.read_table(B_file, header=0, index_col=0)
geneSetB_ls = [l[0] for l in geneSetB.values.tolist()]
geneSetB_set = set(geneSetB_ls)
# In[7]:
# Read weight matrix
weight = pd.read_table(weight_file, header=0, index_col=0).T
weight.head(5)
# ## Explore gene space offset
#
# 1. What genes are most highly weighted?
# 2. What percentage of these genes are in gene set A and B?
# In[8]:
# Distribution of weights in offset vector
sns.distplot(offset_gene_space)
# In[9]:
# Get gene ids with the highest weight from the offset vector
percentile = 95
threshold = np.percentile(offset_gene_space, percentile)
print("Threshold cutoff is {}".format(threshold))
highest_genes = offset_gene_space.T[offset_gene_space.T[0] > threshold].index
# In[10]:
# Compare the overlap of genes in set A and highest weighted genes in offset
venn2([set(highest_genes), geneSetA_set], set_labels = ('High weight offset genes', 'Group A genes'))
plt.show()
# In[11]:
# Compare the overlap of genes in set B and highest weighted genes in offset
venn2([set(highest_genes), geneSetB_set], set_labels = ('High weight offset genes', 'Group B genes'))
plt.show()
# ## Explore latent space (VAE) offset
# 1. Which feature has the highest value?
# 2. Are genes in set A and B highly weighted
# In[12]:
# Distribution of weights in offset vector
sns.distplot(offset_vae_space)
# In[13]:
# Get latent feature with the max and min value
max_feature = offset_vae_space.T.idxmax()[0]
min_feature = offset_vae_space.T.idxmin()[0]
print("Max feature is {} and min feature is {}".format(max_feature, min_feature))
# ### Genes in feature that corresponds to max offset score
# In[14]:
# Get gene weights for max latent feature
genes_max_feature = weight[int(max_feature)]
sns.distplot(genes_max_feature)
# In[15]:
# Get gene ids with the highest positive weight from the max feature selected
percentile = 95
threshold = np.percentile(genes_max_feature, percentile)
print("Threshold cutoff is {}".format(threshold))
highest_genes = genes_max_feature[genes_max_feature > threshold].index
# In[16]:
# Get gene ids with the highest negative weight from the max feature selected
percentile = 5
threshold = np.percentile(genes_max_feature, percentile)
print("Threshold cutoff is {}".format(threshold))
lowest_genes = genes_max_feature[genes_max_feature < threshold].index
# In[17]:
# Compare the overlap of genes in set A and highest positive weighted genes in the max feature
venn2([set(highest_genes), geneSetA_set], set_labels = ('High positive weight genes in feature {}'.format(max_feature), 'Group A genes'))
plt.show()
# In[18]:
# Output intersected sets
intersect_highpos_geneA = geneSetA_set.intersection(set(highest_genes))
intersect_highpos_geneA_df = pd.DataFrame(list(intersect_highpos_geneA), columns=['gene id'])
intersect_file = os.path.join(base_dir, analysis_name, "intersect_feature{}_highpos_geneA.txt".format(max_feature))
intersect_highpos_geneA_df.to_csv(intersect_file, sep='\t')
# In[19]:
# Compare the overlap of genes in set B and highest positive weighted genes in the max feature
venn2([set(highest_genes), geneSetB_set], set_labels = ('High positive weight genes in feature {}'.format(max_feature), 'Group B genes'))
plt.show()
# In[20]:
# Output intersected sets
intersect_highpos_geneB = geneSetB_set.intersection(set(highest_genes))
intersect_highpos_geneB_df = pd.DataFrame(list(intersect_highpos_geneB), columns=['gene id'])
intersect_file = os.path.join(base_dir, analysis_name, "intersect_feature{}_highpos_geneB.txt".format(max_feature))
intersect_highpos_geneB_df.to_csv(intersect_file, sep='\t')
# In[21]:
# Compare the overlap of genes in set A and highest negative weighted genes in the max feature
venn2([set(lowest_genes), geneSetA_set], set_labels = ('High negative weight genes in feature {}'.format(max_feature), 'Group A genes'))
plt.show()
# In[22]:
# Compare the overlap of genes in set B and highest negative weighted genes in the max feature
venn2([set(lowest_genes), geneSetB_set], set_labels = ('High negative weight genes in feature {}'.format(max_feature), 'Group B genes'))
plt.show()
# In[23]:
# Output intersected sets
intersect_highneg_geneB = geneSetB_set.intersection(set(lowest_genes))
intersect_highneg_geneB_df = pd.DataFrame(list(intersect_highneg_geneB), columns=['gene id'])
intersect_file = os.path.join(base_dir, analysis_name, "intersect_feature{}_highneg_geneB.txt".format(max_feature))
intersect_highneg_geneB_df.to_csv(intersect_file, sep='\t')
# ### Genes in feature that corresponds to minimum offset score
# In[24]:
# Get gene weights for min latent feature
genes_min_feature = weight[int(min_feature)]
sns.distplot(genes_min_feature)
# In[25]:
# Get gene ids with the highest positive weight from the min feature selected
percentile = 95
threshold = np.percentile(genes_min_feature, percentile)
print("Threshold cutoff is {}".format(threshold))
highest_genes = genes_min_feature[genes_min_feature > threshold].index
# In[26]:
# Get gene ids with the highest negative weight from the min feature selected
percentile = 5
threshold = np.percentile(genes_min_feature, percentile)
print("Threshold cutoff is {}".format(threshold))
lowest_genes = genes_min_feature[genes_min_feature < threshold].index
# In[27]:
# Compare the overlap of genes in set A and highest positive weighted genes in the min feature
venn2([set(highest_genes), geneSetA_set], set_labels = ('High positive weight genes in feature {}'.format(min_feature), 'Group A genes'))
plt.show()
# In[28]:
# Compare the overlap of genes in set B and highest positive weighted genes in the min feature
venn2([set(highest_genes), geneSetB_set], set_labels = ('High positive weight genes in feature {}'.format(min_feature), 'Group B genes'))
plt.show()
# In[29]:
# Output intersected sets
intersect_highpos_geneB = geneSetB_set.intersection(set(highest_genes))
intersect_highpos_geneB_df = pd.DataFrame(list(intersect_highpos_geneB), columns=['gene id'])
intersect_file = os.path.join(base_dir, analysis_name, "intersect_feature{}_highpos_geneB.txt".format(min_feature))
intersect_highpos_geneB_df.to_csv(intersect_file, sep='\t')
# In[30]:
# Compare the overlap of genes in set A and highest negative weighted genes in the min feature
venn2([set(lowest_genes), geneSetA_set], set_labels = ('High negative weight genes in feature {}'.format(min_feature), 'Group A genes'))
plt.show()
# In[31]:
# Output intersected sets
intersect_highneg_geneA = geneSetA_set.intersection(set(lowest_genes))
intersect_highneg_geneA_df = pd.DataFrame(list(intersect_highneg_geneA), columns=['gene id'])
intersect_file = os.path.join(base_dir, analysis_name, "intersect_feature{}_highneg_geneA.txt".format(min_feature))
intersect_highneg_geneA_df.to_csv(intersect_file, sep='\t')
# In[32]:
# Compare the overlap of genes in set B and highest negative weighted genes in the min feature
venn2([set(lowest_genes), geneSetB_set], set_labels = ('High negative weight genes in feature {}'.format(min_feature), 'Group B genes'))
plt.show()
# In[33]:
# Output intersected sets
intersect_highneg_geneB = geneSetB_set.intersection(set(lowest_genes))
intersect_highneg_geneB_df = pd.DataFrame(list(intersect_highneg_geneB), columns=['gene id'])
intersect_file = os.path.join(base_dir, analysis_name, "intersect_feature{}_highneg_geneB.txt".format(min_feature))
intersect_highneg_geneB_df.to_csv(intersect_file, sep='\t')
# Observation:
#
# Notice that the overlap of the high weight genes in the min features and max feature are very similar -- why is this?
|
python
|
import os
import platform
import click
import kungfu.yijinjing.journal as kfj
import pyyjj
@click.group(invoke_without_command=True)
@click.option('-H', '--home', type=str, help="kungfu home folder, defaults to APPDATA/kungfu/app, where APPDATA defaults to %APPDATA% on windows, "
"~/.config or $XDG_CONFIG_HOME (if set) on linux, ~/Library/Application Support on mac")
@click.option('-l', '--log_level', type=click.Choice(['trace', 'debug', 'info', 'warning', 'error', 'critical']),
default='warning', help='logging level')
@click.option('-n', '--name', type=str, help='name for the process, defaults to command if not set')
@click.pass_context
def kfc(ctx, home, log_level, name):
if not home:
osname = platform.system()
user_home = os.path.expanduser('~')
if osname == 'Linux':
xdg_config_home = os.getenv('XDG_CONFIG_HOME')
home = xdg_config_home if xdg_config_home else os.path.join(user_home, '.config')
if osname == 'Darwin':
home = os.path.join(user_home, 'Library', 'Application Support')
if osname == 'Windows':
home = os.getenv('APPDATA')
home = os.path.join(home, 'kungfu', 'app')
os.environ['KF_HOME'] = ctx.home = home
os.environ['KF_LOG_LEVEL'] = ctx.log_level = log_level
# have to keep locator alive from python side
# https://github.com/pybind/pybind11/issues/1546
ctx.locator = kfj.Locator(home)
ctx.system_config_location = pyyjj.location(pyyjj.mode.LIVE, pyyjj.category.SYSTEM, 'etc', 'kungfu', ctx.locator)
if ctx.invoked_subcommand is None:
click.echo(kfc.get_help(ctx))
else:
ctx.name = name if name else ctx.invoked_subcommand
pass
def pass_ctx_from_parent(ctx):
ctx.home = ctx.parent.home
ctx.log_level = ctx.parent.log_level
ctx.locator = ctx.parent.locator
ctx.system_config_location = ctx.parent.system_config_location
ctx.name = ctx.parent.name
def execute():
kfc(auto_envvar_prefix='KF')
|
python
|
#!/usr/bin/env python
# coding: utf-8
# # Gaussian Mixture Model
#
# GMM runs on 5 features by default: beam, gate, time, velocity, and spectral
# width. It performs well overall, even on clusters that are not well-separated
# in space and time. However, it will often create clusters that are too high
# variance,causing it to pull in scattered points that do not look like they
# should be clustered together - see the fanplots in cluster.ipynb. It is also
# slow, taking 5-10 minutes for one day of data.
#
# Parameters for each algorithm are set to default values (shown below), but can
# be modified using the class constructor.
#
# ### Optional arguments for GMM class constructor
#
# n_clusters=30
# The number of GMM clusters to create.
#
# cov='full'
# The covariance matrix to use for GMM.
# See this post for more details:
# https://stats.stackexchange.com/questions/326671/different-covariance-types-for-gaussian-mixture-models
#
# features=['beam', 'gate', 'time', 'vel', 'wid']
# Names of the features for GMM to run on. Can also include 'elv'.
#
# BoxCox=False
# If BoxCox=True, 'wid' and 'vel' will be BoxCox transformed to
# convert them from an exponential distribution to a Gaussian.
# In[1]:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
sys.path.insert(0, '..')
from algorithms.gmm import GMM
import datetime
import numpy as np
import itertools
start_time = datetime.datetime(2017, 4, 4)
end_time = datetime.datetime(2017, 4, 5)
gmm = GMM(start_time, end_time, 'cvw', cov='full', n_clusters=10, BoxCox=True, load_model=False, save_model=False)
print(gmm.runtime)
# In[2]:
#get_ipython().run_line_magic('matplotlib', 'inline')
# Make RTI plots to compare AJ's threshold with traditional threshold
#gmm.plot_rti(14, 'Ribiero') # Slooow
# Make fanplots of the individual clusters over some time period
#fanplot_start = datetime.datetime(2017, 4, 4, 4, 0, 0)
#fanplot_end = datetime.datetime(2017, 4, 4, 4, 0, 0)
#gmm.plot_fanplots(fanplot_start, fanplot_end)
# In[ ]:
#lowest_bic = np.infty
#bic = []
#n_components_range = range(1, 31)
#cv_types = ['spherical', 'full']
#for cv_type in cv_types:
# for n_components in n_components_range:
# # Fit a Gaussian mixture with EM
# gmm = GMM(start_time, end_time, 'cvw', cov=cv_type, n_clusters=n_components, BoxCox=True, load_model=False, save_model=False)
# bic.append(gmm._bic)
# if bic[-1] < lowest_bic:
# lowest_bic = bic[-1]
# best_gmm = gmm
#bic = np.array(bic)
#color_iter = itertools.cycle(['navy','darkorange'])
#clf = best_gmm
#bars = []
# Plot the BIC scores
#plt.figure(figsize=(5, 3))
#spl = plt.subplot(1, 1, 1)
#for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
# xpos = np.array(n_components_range) + .2 * (i - 2)
# bars.append(plt.bar(xpos, bic[i * len(n_components_range):
# (i + 1) * len(n_components_range)],
# width=.2, color=color))
#plt.xticks(n_components_range[::3])
#plt.yscale("log")
#plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
#plt.title('BIC score per model')
#xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
# .2 * np.floor(bic.argmin() / len(n_components_range))
#plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
#spl.set_xlabel('Number of components')
#spl.legend([b[0] for b in bars], cv_types)
#plt.savefig("../plots/bic.png", bbox_inches="tight")
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-05 07:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0055_merge_20171205_0847'),
]
operations = [
migrations.CreateModel(
name='CustomProjectField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(blank=True, max_length=5000, null=True)),
],
),
migrations.CreateModel(
name='CustomProjectFieldSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.CharField(blank=True, max_length=200, null=True)),
('sequence', models.PositiveIntegerField(db_index=True, default=0, editable=False)),
('project_settings', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='extra_fields', to='projects.ProjectPlatformSettings')),
],
options={
'ordering': ['sequence'],
},
),
migrations.AlterField(
model_name='projectsearchfilter',
name='name',
field=models.CharField(choices=[(b'location', 'Location'), (b'theme', 'Theme'), (b'skills', 'Skill'), (b'date', 'Date'), (b'status', 'Status'), (b'type', 'Type'), (b'category', 'Category')], max_length=100),
),
migrations.AddField(
model_name='customprojectfield',
name='field',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.CustomProjectFieldSettings'),
),
migrations.AddField(
model_name='customprojectfield',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project', verbose_name=b'extra'),
),
]
|
python
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["AuditEventSourceType"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class AuditEventSourceType:
"""
Audit Event Source Type
The type of process where the audit event originated from.
Status: active - Version: 4.0.1
Copyright None
http://terminology.hl7.org/CodeSystem/security-source-type
"""
one = CodeSystemConcept(
{
"code": "1",
"definition": "End-user display device, diagnostic device.",
"display": "User Device",
}
)
"""
User Device
End-user display device, diagnostic device.
"""
two = CodeSystemConcept(
{
"code": "2",
"definition": "Data acquisition device or instrument.",
"display": "Data Interface",
}
)
"""
Data Interface
Data acquisition device or instrument.
"""
three = CodeSystemConcept(
{
"code": "3",
"definition": "Web Server process or thread.",
"display": "Web Server",
}
)
"""
Web Server
Web Server process or thread.
"""
four = CodeSystemConcept(
{
"code": "4",
"definition": "Application Server process or thread.",
"display": "Application Server",
}
)
"""
Application Server
Application Server process or thread.
"""
five = CodeSystemConcept(
{
"code": "5",
"definition": "Database Server process or thread.",
"display": "Database Server",
}
)
"""
Database Server
Database Server process or thread.
"""
six = CodeSystemConcept(
{
"code": "6",
"definition": "Security server, e.g. a domain controller.",
"display": "Security Server",
}
)
"""
Security Server
Security server, e.g. a domain controller.
"""
seven = CodeSystemConcept(
{
"code": "7",
"definition": "ISO level 1-3 network component.",
"display": "Network Device",
}
)
"""
Network Device
ISO level 1-3 network component.
"""
eight = CodeSystemConcept(
{
"code": "8",
"definition": "ISO level 4-6 operating software.",
"display": "Network Router",
}
)
"""
Network Router
ISO level 4-6 operating software.
"""
nine = CodeSystemConcept(
{
"code": "9",
"definition": "Other kind of device (defined by DICOM, but some other code/system can be used).",
"display": "Other",
}
)
"""
Other
Other kind of device (defined by DICOM, but some other code/system can be used).
"""
class Meta:
resource = _resource
|
python
|
from fastapi_tag.base import model
__all__ = ["model"]
|
python
|
#!/usr/bin/env python
"""
plot energy usage by PM jobs
"""
__author__ = "Jan Balewski"
__email__ = "[email protected]"
import numpy as np
import time
from pprint import pprint
from toolbox.Plotter_Backbone import Plotter_Backbone
from toolbox.Util_IOfunc import read_one_csv
from toolbox.Util_Misc import smoothF
import argparse
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-v","--verbosity",type=int,choices=[0, 1, 2],help="increase output verbosity", default=1, dest='verb')
parser.add_argument("-j", "--jobId",default='65244', help=" job ID")
parser.add_argument("-o", "--outPath", default='out/',help="output path for plots and tables")
parser.add_argument( "--smoothWindow", default=10, type=int,help=" smooth the data using a window with requested size (bins)")
parser.add_argument( "-X","--noXterm", dest='noXterm', action='store_true', default=False, help="disable X-term for batch mode")
args = parser.parse_args()
args.prjName='cosmoHpo'
#PM
args.sourcePath='/pscratch/sd/b/balewski/tmp_digitalMind/neuInv/benchmark/september/'
args.formatVenue='prod'
for arg in vars(args): print( 'myArg:',arg, getattr(args, arg))
return args
#...!...!....................
def ana_one_job(jobId,table):
tL=[]; eL={'node_ene_J':[],'cpu_ene_J':[],'memory_ene_J':[]}
for k in range(4): eL['gpu%d_ene_J'%k]=[]
for rec in table:
#print('rec',rec)
t=float(rec['unix_millisec'])/1000.
tL.append(t)
for x in eL: eL[x].append( float(rec[x]))
#print('t',t,'eL',eL)
N=len(tL)
eL['4gpu_energy']=[]
# sume GPU energy
for i in range(N):
sum=0
for k in range(4): sum+=eL['gpu%d_ene_J'%k][i]
eL['4gpu_energy'].append(sum)
#... convert list to NP arrays
for x in eL: eL[x]=np.array(eL[x])
if args.smoothWindow>0:
for x in eL: eL[x]=smoothF(eL[x],args.smoothWindow)
#..... convert energy to power
tL=np.array(tL)
tL-=tL[0]
pL={x:[0] for x in eL}
for i in range(1,N):
dt=tL[i]-tL[i-1]
#print(i,dt)
for x in eL: pL[x].append( (eL[x][i]- eL[x][i-1])/dt)
eT={}
for x in eL:
eT[x]= eL[x][-1]- eL[x][0]
elaT=tL[-1]-tL[0]
outD={'elaT':elaT,'tot_ene':eT,'jobId':jobId,'hostname':rec['hostname']}
pprint(outD)
outD['power']=pL
outD['time']=tL
return outD
#............................
#............................
#............................
class Plotter_EnergyUse(Plotter_Backbone):
def __init__(self, args):
Plotter_Backbone.__init__(self,args)
#...!...!....................
def one_job(self,jobD,figId=5):
nrow,ncol=1,1
# grid is (yN,xN) - y=0 is at the top, so dumm
figId=self.smart_append(figId)
self.plt.figure(figId,facecolor='white', figsize=(10,6))
ax=self.plt.subplot(nrow,ncol,1)
tit='jobId=%s, node=%s'%(jobD['jobId'],jobD['hostname'])
T=jobD['time']
#for k in range(1,4): jobD['power'].pop('gpu%d_ene_J'%k)
for name in jobD['power']:
Y=jobD['power'][name]
ene=jobD['tot_ene'][name] /3600.
dLab='%s: %.1f'%(name,ene)
#print(T,Y)
ax.plot(T,Y,label=dLab)
ax.legend(loc='best', title='total used energy: (Wh)')
ax.set(xlabel='wall time (sec)',ylabel='power (W)', title=tit)
ax.grid(True)
return
#if j==0: ax.text(0.1,0.85,'n=%d'%len(lossV),transform=ax.transAxes)
#=================================
#=================================
# M A I N
#=================================
#=================================
args=get_parser()
stockD={}
jobId=args.jobId
inpF=args.sourcePath+'%s/log.energy_%s.csv'%(jobId,jobId)
table,label=read_one_csv(inpF)
jobD=ana_one_job(jobId,table)
plot=Plotter_EnergyUse(args)
plot.one_job(jobD)
plot.display_all('aa')
|
python
|
# -*- coding: utf-8 -*-
# =============================================================================
# MIT License
#
# Copyright (c) 2018 Charles Jekel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# =============================================================================
import numpy as np
import math
# Define a function that deterimines the circle of best fit for a two
# dimensional data set. Simply supply the X and Y data vectors of the data
# set, and the function returns the x and y center points of the circle, as
# well as the radius of the circle
# Usage: xCenter, yCenter, r = circleFit(X,Y)
def circleFit(X,Y):
# convert the input vectors to numpy arrays
X = np.array(X)
Y = np.array(Y)
# assemble the A matrix
A = np.zeros((len(X),3))
A[:,0] = X*2
A[:,1] = Y*2
A[:,2] = 1
# assemble the f matrix
f = np.zeros((len(X),1))
f[:,0] = (X*X) + (Y*Y)
C, residules, rank, singval = np.linalg.lstsq(A,f)
# solve for r
r = math.sqrt((C[0]*C[0])+(C[1]*C[1])+C[2])
return C[0], C[1], r;
|
python
|
# Script to get the system's info
from __future__ import print_function
import platform
import multiprocessing as mp
'''
Functions:
------------------------------------------------------
sysinfo(display = True, return_info = False)
Gets the details about the system
------------------------------------------------------
'''
# Function to get the system's information
def sysinfo(display = True, return_info = False):
# ----------------------------------------------------
# INPUT:
# ----------------------------------------------------
# display : bool : condition to print details
# return_info : bool : condition to return details
# ----------------------------------------------------
# OUTPUT:
# ----------------------------------------------------
# info : dict : stores the information of the system
# ----------------------------------------------------
# Dictionary storing the information
info = {
'python_version' : platform.python_version(),
'compiler' : platform.python_compiler(),
'os' : platform.system(),
'verison' : platform.release(),
'machine' : platform.machine(),
'processor' : platform.processor(),
'cores' : mp.cpu_count(),
'interpreter' : platform.architecture()[0]
}
# Displays the system details
if display:
print('> Python version :', info['python_version'])
print('> Compiler :', info['compiler'])
print('> Operating System :', info['os'])
print('> Version :', info['verison'])
print('> Machine :', info['machine'])
print('> Processor :', info['processor'])
print('> CPU count :', info['cores'])
print('> Interpreter :', info['interpreter'])
# Returns the system info
if return_info:
return info
if __name__ == '__main__':
sysinfo()
|
python
|
import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Hero.HeroSprite(x=42, y=245,width=32,height=32))
lb.addObject(Beam.BeamSprite(x=42, y=287,width=17,height=14,angle='0',restitution=0.2,static='true',friction=0.5,density=20 ).setName('HeroHook'))
lb.addObject(Joints.DistanceJoint(body1='Hero',body2='HeroHook',damping='0.2',freq='5' ))
lb.addObject(Friend.FriendSprite(classname = 'AccelFriendSprite', x=460, y=16,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ).setName('Friend'))
lb.addObject(Star.StarSprite(x=364, y=264,width=32,height=32))
lb.addObject(Beam.BeamSprite(x=239, y=239,width=282,height=14,angle='0',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=306, y=99,width=282,height=14,angle='14',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=252, y=-1,width=282,height=14,angle='-10',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=271, y=169,width=229,height=14,angle='0',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=394, y=172,width=33,height=14,angle='15',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=239, y=293,width=282,height=14,angle='2',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Enemy.EnemySprite(x=375, y=310,width=8,height=8,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Beam.BeamSprite(x=-21, y=12,width=282,height=14,angle='5',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.render()
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
class UnsupportedPythonError(Exception):
pass
__minimum_python_version__ = "3.6"
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
raise UnsupportedPythonError(f"pyvib does not support Python < {__minimum_python_version__}")
__version__ = "0.2.dev1"
# this indicates whether or not we are in the package's setup.py
# see https://github.com/astropy/astropy/blob/master/astropy/__init__.py#L63
try:
_PYVIB_SETUP_
except NameError:
import builtins
builtins._PYVIB_SETUP_ = False
if not _PYVIB_SETUP_:
pass
#from pyvib.utils.config import load_config, print_config
#from pyvib.utils.sysinfo import system_info
# Load user configuration
#config = load_config()
#__all__ = ['config', 'system_info']
|
python
|
# Generated by Django 2.1.5 on 2019-02-07 23:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hyperion', '0003_userprofile_url'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='github',
field=models.URLField(blank=True),
),
]
|
python
|
import random
from evennia import TICKER_HANDLER
from evennia import CmdSet, Command, DefaultRoom
from evennia import utils, create_object, search_object
from evennia import syscmdkeys, default_cmds
from evennia.contrib.text_sims.objects import LightSource, TutorialObject
# the system error-handling module is defined in the settings. We load the
# given setting here using utils.object_from_module. This way we can use
# it regardless of if we change settings later.
from django.conf import settings
_SEARCH_AT_RESULT = utils.object_from_module(settings.SEARCH_AT_RESULT)
# for the @detail command we inherit from MuxCommand, since
# we want to make use of MuxCommand's pre-parsing of '=' in the
# argument.
class CmdTutorialSetDetail(default_cmds.MuxCommand):
"""
sets a detail on a room
Usage:
@detail <key> = <description>
@detail <key>;<alias>;... = description
Example:
@detail walls = The walls are covered in ...
@detail castle;ruin;tower = The distant ruin ...
This sets a "detail" on the object this command is defined on
(TutorialRoom for this tutorial). This detail can be accessed with
the TutorialRoomLook command sitting on TutorialRoom objects (details
are set as a simple dictionary on the room). This is a Builder command.
We custom parse the key for the ;-separator in order to create
multiple aliases to the detail all at once.
"""
key = "@detail"
locks = "cmd:perm(Builders)"
help_category = "TutorialWorld"
def func(self):
"""
All this does is to check if the object has
the set_detail method and uses it.
"""
if not self.args or not self.rhs:
self.caller.msg("Usage: @detail key = description")
return
if not hasattr(self.obj, "set_detail"):
self.caller.msg("Details cannot be set on %s." % self.obj)
return
for key in self.lhs.split(";"):
# loop over all aliases, if any (if not, this will just be
# the one key to loop over)
self.obj.set_detail(key, self.rhs)
self.caller.msg("Detail set: '%s': '%s'" % (self.lhs, self.rhs))
class TutorialRoomCmdSet(CmdSet):
"""
Implements the simple tutorial cmdset. This will overload the look
command in the default CharacterCmdSet since it has a higher
priority (ChracterCmdSet has prio 0)
"""
key = "tutorial_cmdset"
priority = 1
def at_cmdset_creation(self):
"add the tutorial-room commands"
self.add(CmdTutorialSetDetail())
##### All rooms are derived from this ######
class TutorialRoom(DefaultRoom):
def at_object_creation(self):
"Called when room is first created"
self.db.desc = "Hello from the Baseroom."
self.cmdset.add_default(TutorialRoomCmdSet)
def at_object_receive(self, new_arrival, source_location):
"""
When an object enter a tutorial room we tell other objects in
the room about it by trying to call a hook on them. The Mob object
uses this to cheaply get notified of enemies without having
to constantly scan for them.
Args:
new_arrival (Object): the object that just entered this room.
source_location (Object): the previous location of new_arrival.
"""
if new_arrival.has_player and not new_arrival.is_superuser:
# this is a character
for obj in self.contents_get(exclude=new_arrival):
if hasattr(obj, "at_new_arrival"):
obj.at_new_arrival(new_arrival)
def return_detail(self, detailkey):
"""
This looks for an Attribute "obj_details" and possibly
returns the value of it.
Args:
detailkey (str): The detail being looked at. This is
case-insensitive.
"""
details = self.db.details
if details:
return details.get(detailkey.lower(), None)
def set_detail(self, detailkey, description):
"""
This sets a new detail, using an Attribute "details".
Args:
detailkey (str): The detail identifier to add (for
aliases you need to add multiple keys to the
same description). Case-insensitive.
description (str): The text to return when looking
at the given detailkey.
"""
if not self.db.details:
self.db.details = {}
# to keep the descriptions of the room separate from object details
if detailkey.startswith('desc'):
if 'desc' not in self.db.details:
self.db.details['desc'] = {}
self.db.details['desc'][detailkey.lower()] = description
else:
self.db.details[detailkey.lower()] = description
def return_appearance(self, looker):
"""
This formats a description. It is the hook a 'look' command
should call.
Args:
looker (Object): Object doing the looking.
"""
if not looker:
return
# # get and identify all objects
# visible = (con for con in self.contents if con != looker and
# con.access(looker, "view"))
# exits, users, things = [], [], []
# for con in visible:
# key = con.key
# if con.destination:
# exits.append(key)
# elif con.has_player:
# users.append("{c%s{n" % key)
# else:
# things.append(key)
# get description, build string
string = "{c%s{n\n" % self.key
if self.db.details:
randIndex = random.randint(0, len(self.db.details['desc'])-1)
desc = self.db.details['desc'].values()[randIndex]
else:
desc = self.db.desc
return desc
|
python
|
#coding=utf8
import itchat
import re
import sys
from itchat.content import *
global author
global isDisturbOn
global visit
global username
def messageProccess(msg):
if not msg["FromUserName"] == author:
if msg["Type"] == "Text":
if re.search("#sinon", msg["Text"]) == None:
if isDisturbOn:
donotDisturb(msg["FromUserName"])
else:
notice(msg["NickName"])
else:
sinonService(msg["FromUserName"], msg["Text"])
else:
if isDisturbOn:
donotDisturb(msg["FromUserName"])
else:
notice(msg["NickName"])
def notice(name):
print "%s send a message to you." % name
def remoteControl(text):
if text == "-h":
itchat.send("-s Stop auto reply\n-d Turn on the Don't disturb\n -f Turn off the Don't disturb", "filehelper")
elif text == "-s":
itchat.logout()
elif text == "-d":
isDisturbOn = True
itchat.send("Turn on the Don't disturb", "filehelper")
elif text == "-f":
isDisturbOn = False
itchat.send("Turn off the Don't disturb", "filehelper")
def sinonService(name, text):
itchat.send("woops!There is no function can be used yet~", name)
def donotDisturb(name):
if not visit.get(name, False):
visit[name] = True
itchat.send("@img@%s" % "sinon.jpg", name)
itchat.send("Sorry!%s can't reply you immediately!\nI'm auto-reply bot called sinon. Send #sinon and chat with me!(If you are in a group please @Fa1sePRoMiSe first.)\nsinon's github:https://github.com/NeilKleistGao/SinonChatBot\nWechatAPI github:https://github.com/littlecodersh/itchat\nImage is from Pixiv:https://www.pixiv.net/member_illust.php?mode=medium&illust_id=66989215" % username, name)
@itchat.msg_register([TEXT, MAP, CARD, SHARING, PICTURE, RECORDING, ATTACHMENT, VIDEO], isGroupChat = False)
def autoRecieve(msg):
if msg["ToUserName"] == "filehelper" and msg["Type"] == "Text":
remoteControl(msg["Text"])
else:
messageProccess(msg)
@itchat.msg_register([TEXT, MAP, CARD, SHARING, PICTURE, RECORDING, ATTACHMENT, VIDEO], isGroupChat = True)
def autoRecieveInGroup(msg):
if msg["isAt"]:
messageProccess(msg)
visit = {}
username = "Fa1sePRoMiSe"
isDisturbOn = True
if len(sys.argv) == 2:
username = sys.argv[1]
elif len(sys.argv) == 3:
username = sys.argv[1]
isDisturbOn = sys.argv[2]
itchat.auto_login(hotReload=True)
author = itchat.search_friends(nickName=username)[0]["UserName"]
itchat.send("You have started Sinon on your PC.Please enter -h for help.", "filehelper")
itchat.run()
|
python
|
import os
from setuptools import setup
from gitissius import gitissius
setup(
name = "Gitissius",
version = gitissius.VERSION,
author = "Giorgos Logiotatidis",
author_email = "[email protected]",
description = "Distributed bug tracking for Git.",
license = "Mixed",
keywords = "bug, tracking, git, distributed",
url="http://github.com/glogiotatidis/gitissius",
packages=['gitissius', 'gitissius.commands'],
classifiers = [
"Topic :: Software Development :: Bug Tracking",
"Development Status :: 4 - Beta",
"License :: Freely Distributable",
"License :: OSI Approved :: GNU General Public License (GPL)"
],
entry_points = {
'console_scripts': ['git-issius = gitissius.gitissius:main']
},
data_files = [
('gitissius', ['README.org', 'LICENSE']),
]
)
|
python
|
"""
Plot ENSO from 2015-2018 using daily oisstv2
Data : 18 March 2018
Author : Zachary M. Labe
"""
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import datetime
import cmocean
### Directory and time
directoryfigure = './Figures/'
directorydata = './Data/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
currentdoy = now.timetuple().tm_yday
data = Dataset(directorydata + 'sst.day.anom.2015.v2.nc')
sst15 = data.variables['anom'][180:]
data.close()
data = Dataset(directorydata + 'sst.day.anom.2016.nc')
sst16 = data.variables['anom'][:]
data.close()
data = Dataset(directorydata + 'sst.day.anom.2017.nc')
sst17 = data.variables['anom'][:]
data.close()
data = Dataset(directorydata + 'sst.day.anom.2018.nc')
sst18 = data.variables['anom'][:]
lat = data.variables['lat'][:]
lon = data.variables['lon'][:]
data.close()
lon2,lat2 = np.meshgrid(lon,lat)
sstnn = np.append(sst15,sst16,axis=0)
sstn = np.append(sstnn,sst17,axis=0)
sst = np.append(sstn,sst18,axis=0)
def groupedAvg(myArray, N):
result = np.cumsum(myArray, 0)[N-1::N]/float(N)
result[1:] = result[1:] - result[:-1]
return result
smooth = groupedAvg(sst,10) # 10-day mean
year15 = np.repeat(np.array([2015]),18)
year16 = np.repeat(np.array([2016]),36)
year17 = np.repeat(np.array([2017]),36)
year18 = np.repeat(np.array([2018]),9)
year1 = np.append(year15,year16)
year2 = np.append(year1,year17)
years = np.append(year2,year18)
###########################################################################
###########################################################################
###########################################################################
### Create plot
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='dimgrey')
plt.rc('xtick',color='dimgrey')
plt.rc('ytick',color='dimgrey')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
def plot_rec(bmap, lower_left, upper_left, lower_right, upper_right):
xs = [lower_left[0], upper_left[0],
upper_right[0],lower_right[0],
lower_left[0]]
ys = [lower_left[1], upper_left[1],
upper_right[1],lower_right[1],
lower_left[1]]
if np.nanmax(xs)==240:
l=3.3
aa = 0.8
else:
l=1.4
aa = 0.6
bmap.plot(xs, ys, latlon = True,color='k',alpha=aa,linewidth=l)
llcrnrlon = 270
urcrnrlon = 280
llcrnrlat = -10
urcrnrlat = 0
lower_left1 = (llcrnrlon, llcrnrlat)
lower_right1= (urcrnrlon, llcrnrlat)
upper_left1 = (llcrnrlon, urcrnrlat)
upper_right1= (urcrnrlon, urcrnrlat)
llcrnrlon = 210
urcrnrlon = 270
llcrnrlat = -5
urcrnrlat = 5
lower_left2 = (llcrnrlon, llcrnrlat)
lower_right2= (urcrnrlon, llcrnrlat)
upper_left2 = (llcrnrlon, urcrnrlat)
upper_right2= (urcrnrlon, urcrnrlat)
llcrnrlon = 190
urcrnrlon = 240
llcrnrlat = -5
urcrnrlat = 5
lower_left3 = (llcrnrlon, llcrnrlat)
lower_right3= (urcrnrlon, llcrnrlat)
upper_left3 = (llcrnrlon, urcrnrlat)
upper_right3= (urcrnrlon, urcrnrlat)
llcrnrlon = 160
urcrnrlon = 210
llcrnrlat = -5
urcrnrlat = 5
lower_left4 = (llcrnrlon, llcrnrlat)
lower_right4= (urcrnrlon, llcrnrlat)
upper_left4 = (llcrnrlon, urcrnrlat)
upper_right4= (urcrnrlon, urcrnrlat)
barlim=np.arange(-3,4,3)
for i in range(smooth.shape[0]):
fig = plt.figure(figsize=(9,5))
ax = plt.subplot(111)
m = Basemap(projection='merc',llcrnrlat=-17,urcrnrlat=17,\
llcrnrlon=180,urcrnrlon=290,resolution='l')
m.drawcoastlines()
m.fillcontinents(color='k',lake_color='k')
m.drawmapboundary(fill_color='k')
cs=m.contourf(lon2,lat2,smooth[i],np.arange(-3,3.02,0.1),latlon=True,
extend='both')
cs.set_cmap(cmocean.cm.balance)
cbar = plt.colorbar(cs,drawedges=False,orientation='horizontal',
pad = 0.04,fraction=0.047,extend='both')
cbar.set_ticks(barlim)
cbar.set_ticklabels(list(map(str,barlim)))
cbar.ax.tick_params(axis='x', size=.001)
cbar.ax.tick_params(labelsize=13)
cbar.set_label(r'\textbf{%s}' % years[i],color='darkgrey',
fontsize=30)
### Draw ENSO boxes
plot_rec(m, lower_left1, upper_left1, lower_right1, upper_right1)
plot_rec(m, lower_left2, upper_left2, lower_right2, upper_right2)
plot_rec(m, lower_left3, upper_left3, lower_right3, upper_right3)
plot_rec(m, lower_left4, upper_left4, lower_right4, upper_right4)
plt.title(r'\textbf{SEA SURFACE TEMPERATURE ANOMALIES}',color='darkgrey',
fontsize=30)
plt.annotate(r'\textbf{DATA}: NOAA OISSTv2 [\textbf{BASE: 1971-2000}]',
textcoords='axes fraction',xy=(0,0),xytext=(0.76,-0.08),
fontsize=5,color='darkgrey',ha='left',va='center')
plt.annotate(r'\textbf{SOURCE}: http://www.esrl.noaa.gov/psd/',
textcoords='axes fraction',xy=(0,0),xytext=(0.76,-0.11),
fontsize=5,color='darkgrey',ha='left',va='center')
plt.annotate(r'\textbf{GRAPHIC}: Zachary Labe (@ZLabe)',
textcoords='axes fraction',xy=(0,0),xytext=(0.76,-0.14),
fontsize=5,color='darkgrey',ha='left',va='center')
plt.annotate(r'\textbf{$^\circ$C}',
textcoords='axes fraction',xy=(0,0),xytext=(0.2,-0.11),
fontsize=20,color='darkgrey',ha='left',va='center')
plt.subplots_adjust(bottom=0.2)
if i < 10:
plt.savefig(directoryfigure + 'sstq_00%s.png' % (i),
dpi=170)
elif i < 100:
plt.savefig(directoryfigure + 'sstq_0%s.png' % (i),
dpi=170)
elif i==98:
plt.savefig(directoryfigure + 'sstq_991.png',
dpi=170)
plt.savefig(directoryfigure + 'sstq_992.png',
dpi=170)
plt.savefig(directoryfigure + 'sstq_993.png',
dpi=170)
plt.savefig(directoryfigure + 'sstq_994.png',
dpi=170)
plt.savefig(directoryfigure + 'sstq_995.png',
dpi=170)
plt.savefig(directoryfigure + 'sstq_996.png',
dpi=170)
plt.savefig(directoryfigure + 'sstq_997.png',
dpi=170)
plt.savefig(directoryfigure + 'sstq_998.png',
dpi=170)
plt.savefig(directoryfigure + 'sstq_999.png',
dpi=170)
else:
plt.savefig(directoryfigure + 'sstq_%s.png' % (i),
dpi=170)
|
python
|
# -*- coding: utf-8 -*-
"""
Guney's network proposed in A Deep Neural Network for SSVEP-based Brain-Computer Interfaces.
Modified from https://github.com/osmanberke/Deep-SSVEP-BCI.git
"""
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base import compute_same_pad2d, _narrow_normal_weight_zero_bias, compute_out_size, SkorchNet
@SkorchNet
class GuneyNet(nn.Module):
"""
Guney's network for decoding SSVEP.
They used two stages to train the network.
The first stage is with all training data in the dataset.
lr: 1e-4, batch_size: 100, l2_regularization: 1e-3, epochs: 1000
The second stage is a fine-tuning process with each subject's training data.
lr: 1e-4, batch_size: full size, l2_regularization: 1e-3, epochs: 1000
spatial_dropout=time1_dropout=0.6
"""
def __init__(self, n_channels, n_samples, n_classes, n_bands,
n_spatial_filters=120, spatial_dropout=0.1,
time1_kernel=2, time1_stride=2, n_time1_filters=120,
time1_dropout=0.1,
time2_kernel=10, n_time2_filters=120,
time2_dropout=0.95):
# super(GuneyNet, self).__init__()
super().__init__()
self.n_channels = n_channels
self.n_samples = n_samples
self.n_classes = n_classes
self.n_bands = n_bands
self.model = nn.Sequential(OrderedDict([
('band_layer', nn.Conv2d(n_bands, 1, (1, 1), bias=False)),
('spatial_layer', nn.Conv2d(1, n_spatial_filters, (n_channels, 1))),
('spatial_dropout', nn.Dropout(spatial_dropout)),
('time1_layer',
nn.Conv2d(n_spatial_filters, n_time1_filters, (1, time1_kernel),
stride=(1, time1_stride))),
('time1_dropout', nn.Dropout(time1_dropout)),
('relu', nn.ReLU()),
('same_padding',
nn.ConstantPad2d(
compute_same_pad2d(
(1, compute_out_size(n_samples, time1_kernel, stride=time1_stride)),
(1, time2_kernel),
stride=(1, 1)),
0)),
('time2_layer',
nn.Conv2d(n_time1_filters, n_time2_filters, (1, time2_kernel),
stride=(1, 1))),
('time2_dropout', nn.Dropout(time2_dropout)),
('flatten', nn.Flatten()),
('fc_layer', nn.Linear(
n_time2_filters*compute_out_size(n_samples, time1_kernel, stride=time1_stride),
n_classes))
]))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
_narrow_normal_weight_zero_bias(self)
nn.init.ones_(self.model[0].weight)
# MATLAB uses xavier_uniform_ with varaiance 2/(input+output)
# perhaps this is a mistake in Help document
nn.init.xavier_normal_(self.model[-1].weight, gain=1)
def forward(self, X):
# X: (n_batch, n_bands, n_channels, n_samples)
out = self.model(X)
return out
|
python
|
import numpy as np
import itertools
import torch
class TestAugmentor(object):
"""Test Augmentor.
Args:
mode (str): inference mode ('min', 'max', 'mean').
num_aug (int): number of data augmentations: 4-fold, 16-fold
"""
def __init__(self, mode='min', num_aug=4):
self.mode = mode
self.num_aug = num_aug
assert num_aug in [4, 16], "TestAugmentor.num_aug should be either 4 or 16!"
def __call__(self, model, data):
out = None
cc = 0
if self.num_aug == 4:
opts = itertools.product((False, ), (False, ), (False, True), (False, True))
else:
opts = itertools.product((False, True), (False, True), (False, True), (False, True))
for xflip, yflip, zflip, transpose in opts:
volume = data.clone()
# b,c,z,y,x
if xflip:
volume = torch.flip(volume, [4])
if yflip:
volume = torch.flip(volume, [3])
if zflip:
volume = torch.flip(volume, [2])
if transpose:
volume = torch.transpose(volume, 3, 4)
# aff: 3*z*y*x
vout = model(volume).detach().cpu()
if transpose: # swap x-/y-affinity
vout = torch.transpose(vout, 3, 4)
if zflip:
vout = torch.flip(vout, [2])
if yflip:
vout = torch.flip(vout, [3])
if xflip:
vout = torch.flip(vout, [4])
# cast to numpy array
vout = vout.numpy()
if out is None:
if self.mode == 'min':
out = np.ones(vout.shape, dtype=np.float32)
elif self.mode == 'max':
out = np.zeros(vout.shape, dtype=np.float32)
elif self.mode == 'mean':
out = np.zeros(vout.shape, dtype=np.float32)
if self.mode == 'min':
out = np.minimum(out, vout)
elif self.mode == 'max':
out = np.maximum(out, vout)
elif self.mode == 'mean':
out += vout
cc+=1
if self.mode == 'mean':
out = out/cc
return out
def update_name(self, name):
extension = "_"
if self.num_aug == 4:
extension += "tz"
else:
extension += "tzyx"
# Update the suffix of the output filename to indicate
# the use of test-time data augmentation.
name_list = name.split('.')
new_filename = name_list[0] + extension + "." + name_list[1]
return new_filename
|
python
|
"""
Cciss - Files ``/proc/driver/cciss/cciss*``
===========================================
Reads the ``/proc/driver/cciss/cciss*`` files and converts them into a
dictionary in the *data* property.
Example:
>>> cciss = shared[Cciss]
>>> cciss.data['Logical drives']
'1'
>>> 'IRQ' in cciss.data
True
>>> cciss.model
'HP Smart Array P220i Controller'
>>> cciss.firmware_version
'3.42'
"""
from .. import Parser, parser
from insights.specs import Specs
@parser(Specs.cciss)
class Cciss(Parser):
'''
Class for parsing the content of ``/etc/device/cciss*``
Raw Data::
cciss0: HP Smart Array P220i Controller
Board ID: 0x3355103c
Firmware Version: 3.42
IRQ: 82
Logical drives: 1
Sector size: 8192
Current Q depth: 0
Current # commands on controller: 0
Max Q depth since init: 84
Max # commands on controller since init: 111
Max SG entries since init: 128
Sequential access devices: 0
cciss/c0d0: 299.96GB RAID 1(1+0)
Output::
data = {
"Sequential access devices": "0",
"Current Q depth": "0",
"cciss0": "HP Smart Array P220i Controller",
"Board ID": "0x3355103c",
"IRQ": "82",
"cciss/c0d0": "299.96GB RAID 1(1+0)",
"Logical drives": "1",
"Current # commands on controller": "0",
"Sector size": "8192",
"Firmware Version": "3.42",
"Max # commands on controller since init": "111",
"Max SG entries since init": "128",
"Max Q depth since init": "84"
}
'''
def parse_content(self, content):
self.device = self.file_name
self.data = {}
for line in content:
if line.strip():
key, val = line.split(":", 1)
self.data[key.strip()] = val.strip()
@property
def firmware_version(self):
'''Return the Firmware Version.'''
return self.data.get('Firmware Version')
@property
def model(self):
'''Return the full model name of the cciss device.'''
return self.data.get(self.device)
|
python
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
import logging
import os
import errno
from .BaseDispatch import BaseDispatch
try:
import matplotlib.pyplot as plt
import numpy as np
IMPORTED = True
except Exception as e:
logging.warning('Failed to import numpy or matplotlib. Are you sure they are properly installed?')
logging.warning('You can ignore this warning if you do not plan to use Matplotlib')
logging.warning(e)
IMPORTED = False
class MatplotlibDispatch(BaseDispatch):
"""Display events via Matplotlib backend. This class requires some heavy dependencies, and so
trying to run it without Matplotlib and Numpy installed will result in pass-thru behavior
Arguments
---------
task_params : dict
Dictionary of the task json specification, including name and ID number
img_folder : string
Folder to save output images to
"""
def __init__(self, task_params, img_folder):
super().__init__()
# Data will be a dictionary of lists
self._data = {}
self.task_params = task_params
self._img_folder = img_folder
self._legend_keys = []
def setup_display(self, time_axis, attributes, show_windows=False):
if IMPORTED:
super().setup_display(time_axis, attributes)
# Setup data
for item in self._attributes:
if item != self._time_axis:
self._data[item] = []
# Setup plotting
plt.figure(figsize=(12, 10))
if show_windows:
plt.ion()
plt.show()
else:
logging.error('You need Matplotlib and Numpy to run the MatplotlibDispatch, please install them')
def train_event(self, event):
"""Plot a basic training and testing curve via Matplotlib
Arguments
---------
event : TrainingEvent.TrainingEvent
Event to add to Matplotlib plot
"""
if IMPORTED:
super().train_event(event)
time = event.attributes[event.time_axis]
for item in event.attributes:
if item != event.time_axis:
val = event.attributes[item]
self._data[item].append([time, val])
# Convert to numpy arrays
np_data = []
mins = {}
maxes = {}
for key in self._data:
if self._data[key]:
data = np.array(self._data[key])
mins[key] = np.min(data, axis=0)[1]
maxes[key] = np.max(data, axis=0)[1]
np_data.append(data[:, 0])
np_data.append(data[:, 1])
plt.clf()
plt.plot(*np_data)
self._legend_keys = []
for k in self._data.keys():
text = "{} (".format(k.title())
if k in maxes:
text += "Max: {:0.4f} ".format(float(maxes[k]))
if k in mins:
text += "Min: {:0.4f}".format(float(mins[k]))
text += ")"
self._legend_keys.append(text)
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height*0.9])
plt.legend(self._legend_keys,
bbox_to_anchor=(0.5, -0.05),
loc='upper center',
ncol=2,
borderaxespad=0.)
plt.title(self.task_params['title'])
plt.grid(True, which='both')
plt.draw()
else:
logging.error('Improper requirements, skipping train event')
def train_finish(self):
"""Save our output figure to PNG format, as defined by the save path `img_folder`"""
if IMPORTED:
filename = self.task_params['title'].replace(' ', '_')
save_path = os.path.join(self._img_folder, filename)
logging.info('Finished training! Saving output image to {0}'.format(save_path))
logging.info('\'{}\' Final Extremes: {}'.format(self.task_params['title'], self._legend_keys))
try:
fold = os.path.basename(self._img_folder)
logging.info("Creating folder {}".format(fold))
os.makedirs(fold)
except OSError as e:
if e.errno != errno.EEXIST:
raise
plt.savefig(save_path, bbox_inches='tight', format='png')
plt.close()
else:
logging.error('Improper requirements, skipping train finish')
|
python
|
from nintendo import account
api = account.AccountAPI()
pid = api.get_pid("Kinnay-WiiU")
mii = api.get_mii(pid)
print("NNID:", mii.nnid)
print("PID:", pid) #Same as mii.pid
print("Name:", mii.name)
info = mii.data
print("Mii:")
print("\tBirthday: %i-%i" %(info.birth_day, info.birth_month))
print("\tCreator name:", info.creator_name)
print("\tMii color:", info.color)
print("\tMii size: %i%%" %(info.size / 128 * 100))
print("\tMii weight: %i%%" %(info.fatness / 128 * 100))
print("\tGender:", ["Male", "Female"][info.gender])
print("\t----------")
print("\tBlush style:", info.blush_type)
print("\tFace style:", info.face_style)
print("\tFace color:", info.face_color)
print("\tFace shape:", info.face_type)
print("\t----------")
print("\tHair style:", info.hair_type)
print("\tHair color:", info.hair_color)
print("\tHair mirrored:", ["No", "Yes"][info.hair_mirrored])
print("\t----------")
print("\tEye style:", info.eye_type)
print("\tEye color:", info.eye_color)
print("\tEye size:", info.eye_scale)
print("\tEye thickness:", info.eye_thickness)
print("\tEye height:", info.eye_height)
print("\tEye distance:", info.eye_distance)
print("\tEye rotation:", info.eye_rotation)
print("\t----------")
print("\tEyebrow style:", info.eyebrow_type)
print("\tEyebrow color:", info.eyebrow_color)
print("\tEyebrow size:", info.eyebrow_scale)
print("\tEyebrow thickness:", info.eyebrow_thickness)
print("\tEyebrow height:", info.eyebrow_height)
print("\tEyebrow distance:", info.eyebrow_distance)
print("\tEyebrow rotation:", info.eyebrow_rotation)
print("\t----------")
print("\tNose style:", info.nose_type)
print("\tNose size:", info.nose_scale)
print("\tNose height:", info.nose_height)
print("\t----------")
print("\tMouth style:", info.mouth_type)
print("\tMouth color:", info.mouth_color)
print("\tMouth size:", info.mouth_scale)
print("\tMouth thickness:", info.mouth_thickness)
print("\tMouth height:", info.mouth_height)
print("\t----------")
print("\tMustache style:", info.mustache_type)
if info.mustache_type:
print("\tMustache height:", info.mustache_height)
print("\tMustache size:", info.mustache_scale)
print("\tBeard style:", info.beard_type)
if info.beard_type:
print("\tBeard color:", info.beard_color)
print("\t----------")
print("\tGlasses style:", info.glass_type)
if info.glass_type:
print("\tGlasses color:", info.glass_color)
print("\tGlasses size:", info.glass_scale)
print("\tGlasses height:", info.glass_height)
print("\t----------")
print("\tMole:", ["No", "Yes"][info.mole_enabled])
if info.mole_enabled:
print("\tMole size:", info.mole_scale)
print("\tMole X:", info.mole_xpos)
print("\tMole Y:", info.mole_ypos)
print("Images:")
for url in mii.images.values():
print("\t%s" %url)
|
python
|
from unittest.mock import patch
from pytest import fixture
@fixture(autouse=True)
def environ():
environ = {
"slack_access_token": "slack-access-token",
"slack_signing_secret": "slack-signing-secret",
}
with patch.dict("os.environ", environ, clear=True):
yield environ
@fixture
def settings(environ):
from fastapi_slack import Settings
return Settings()
|
python
|
import cv2
import numpy as np
from operator import floordiv
vc = cv2.VideoCapture(0)
cv2.namedWindow("top")
cv2.namedWindow("bottom")
if vc.isOpened():
rval, frame = vc.read()
height = len(frame)
width = len(frame[0])
else:
rval = False
lights = (16,9)
history = 5
top = np.zeros((lights[0], 1, 3), dtype = "uint8")
bottom = np.zeros((lights[0], 1, 3), dtype = "uint8")
left = np.zeros((lights[1]-2, 1, 3), dtype = "uint8")
right = np.zeros((lights[1]-2, 1, 3), dtype = "uint8")
topHistory = np.zeros((lights[0], history, 3), dtype = "uint8")
bottomHistory = np.zeros((lights[0], history, 3), dtype = "uint8")
leftHistory = np.zeros((lights[1]-2, history, 3), dtype = "uint8")
rightHistory = np.zeros((lights[1]-2, history, 3), dtype = "uint8")
currIndex = 0
prevIndex = history - 1
while rval:
rval, frame = vc.read()
smaller = cv2.resize(frame, lights)
for x in range(0,lights[0]-1):
top[x][0] = top[x][0] - topHistory[x][currIndex]
bottom[x][0] = bottom[x][0] - bottomHistory[x][currIndex]
topHistory[x][currIndex] = smaller[0][x] / history
bottomHistory[x][currIndex] = smaller[lights[1]-1][x] / history
top[x][0] = top[x][0] + topHistory[x][currIndex]
bottom[x][0] = bottom[x][0] + bottomHistory[x][currIndex]
for y in range(1,lights[1]-2):
left[y][0] = left[y][0] - leftHistory[y][currIndex]
right[y][0] = right[y][0] - rightHistory[y][currIndex]
leftHistory[y][currIndex] = smaller[y][0] / history
rightHistory[y][currIndex] = smaller[y][lights[0]-1] / history
left[y][0] = left[y][0] + leftHistory[y][currIndex]
right[y][0] = right[y][0] + rightHistory[y][currIndex]
prevIndex = currIndex
currIndex = currIndex + 1
if currIndex >= history:
currIndex = 0
topLarger = cv2.resize(cv2.transpose(top), (width, int(height/lights[1])), interpolation=cv2.INTER_AREA)
cv2.imshow("top",topLarger)
bottomLarger = cv2.resize(cv2.transpose(bottom), (width, int(height/lights[1])), interpolation=cv2.INTER_AREA)
cv2.imshow("bottom",bottomLarger)
key = cv2.waitKey(15)
if key == 27: # ESC key
break
cv2.destroyAllWindows()
|
python
|
"""
If you use this code, please cite one of the SynthSeg papers:
https://github.com/BBillot/SynthSeg/blob/master/bibtex.bib
Copyright 2020 Benjamin Billot
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and limitations under the
License.
"""
# python imports
import numpy as np
import tensorflow as tf
import keras.layers as KL
from keras.models import Model
# third-party imports
from ext.lab2im import utils
from ext.lab2im import layers
from ext.lab2im import edit_tensors as l2i_et
def metrics_model(input_model, label_list, metrics='dice'):
# get prediction
last_tensor = input_model.outputs[0]
input_shape = last_tensor.get_shape().as_list()[1:]
# check shapes
n_labels = input_shape[-1]
label_list = np.unique(label_list)
assert n_labels == len(label_list), 'label_list should be as long as the posteriors channels'
# get GT and convert it to probabilistic values
labels_gt = input_model.get_layer('labels_out').output
labels_gt = layers.ConvertLabels(label_list)(labels_gt)
labels_gt = KL.Lambda(lambda x: tf.one_hot(tf.cast(x, dtype='int32'), depth=n_labels, axis=-1))(labels_gt)
labels_gt = KL.Reshape(input_shape)(labels_gt)
# make sure the tensors have the right keras shape
last_tensor._keras_shape = tuple(last_tensor.get_shape().as_list())
labels_gt._keras_shape = tuple(labels_gt.get_shape().as_list())
if metrics == 'dice':
last_tensor = layers.DiceLoss()([labels_gt, last_tensor])
elif metrics == 'wl2':
last_tensor = layers.WeightedL2Loss(target_value=5)([labels_gt, last_tensor])
else:
raise Exception('metrics should either be "dice or "wl2, got {}'.format(metrics))
# create the model and return
model = Model(inputs=input_model.inputs, outputs=last_tensor)
return model
class IdentityLoss(object):
"""Very simple loss, as the computation of the loss as been directly implemented in the model."""
def __init__(self, keepdims=True):
self.keepdims = keepdims
def loss(self, y_true, y_predicted):
"""Because the metrics is already calculated in the model, we simply return y_predicted.
We still need to put y_true in the inputs, as it's expected by keras."""
loss = y_predicted
tf.debugging.check_numerics(loss, 'Loss not finite')
return loss
|
python
|
"""Multiply SOIL13 and SOIL14 by 10 to change units from % to g/kg"""
import pyiem.cscap_utils as util
def main():
"""Go Main Go"""
config = util.get_config()
spr_client = util.get_spreadsheet_client(config)
drive = util.get_driveclient(config)
# Fake last conditional to make it easy to reprocess one site...
res = (
drive.files()
.list(q=("title contains 'Soil Texture Data'"), maxResults=999)
.execute()
)
HEADERS = [
"uniqueid",
"plotid",
"depth",
"tillage",
"rotation",
"soil6",
"nitrogen",
"drainage",
"rep",
"subsample",
"landscape",
"notes",
"herbicide",
"sampledate",
]
sz = len(res["items"])
for i, item in enumerate(res["items"]):
if item["mimeType"] != "application/vnd.google-apps.spreadsheet":
continue
spreadsheet = util.Spreadsheet(spr_client, item["id"])
spreadsheet.get_worksheets()
for year in spreadsheet.worksheets:
print(
'%3i/%3i sheet "%s" for "%s"'
% (i + 1, sz, year, item["title"])
)
lf = spreadsheet.worksheets[year].get_list_feed()
for rownum, entry in enumerate(lf.entry):
dirty = False
data = entry.to_dict()
for key in ["soil13", "soil14"]:
if key not in data:
continue
value = data[key]
if rownum == 1 and value == "%":
print("updating % to g/kg")
entry.set_value(key, "g/kg")
dirty = True
continue
if rownum >= 2:
try:
newvalue = float(value) * 10.0
except Exception:
continue
print("%s updating %s to %s" % (key, value, newvalue))
entry.set_value(key, "%.4f" % (newvalue,))
dirty = True
if dirty:
util.exponential_backoff(spr_client.update, entry)
if __name__ == "__main__":
main()
|
python
|
##!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context('poster')
plt.subplot(1,1,1)
data = np.genfromtxt(fname='energy.dat')
#data = np.loadtxt('traj.dat')
#for x in range(1,data.shape[-1]):
plt.plot(data[:,0],data[:,1],label='kinetic')
plt.plot(data[:,0],data[:,2],label='potential')
plt.plot(data[:,0],data[:,3],label='quantum potential')
plt.plot(data[:,0],data[:,4],label='total')
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
plt.xlabel('time')
plt.ylabel('$x_i$')
#plt.title('traj')
plt.legend()
plt.show()
|
python
|
import nxutils
import networkx as nx
# TODO: Update this to remove dependency on static zipcode
# if we ever need to use this code
zipCode = '02138'
prefix = 'neighborhood'
n = nxutils.NXUtils(prefix, zipCode)
n.buildNetwork()
DG = n.getNetwork()
fullCycles = nx.simple_cycles(DG)
print('Number of cycles for all stress')
print(len(list(fullCycles)))
MG = n.getStressNetwork(2)
stressCycles = len(list(nx.simple_cycles(SG)))
print('Number of cycles for medium stress')
print(fullCycles)
SG = n.getStressNetwork(1)
stressCycles = len(list(nx.simple_cycles(SG)))
print('Number of cycles for low stress')
print(fullCycles)
|
python
|
# This file was auto generated; Do not modify, if you value your sanity!
import ctypes
try: # 2
from can_settings import can_settings
from swcan_settings import swcan_settings
except:
from ics.structures.can_settings import can_settings
from ics.structures.swcan_settings import swcan_settings
# flags
class flags(ctypes.Structure):
_pack_ = 2
_fields_ = [
('disableUsbCheckOnBoot', ctypes.c_uint32, 1), # [Bitfield]
('enableLatencyTest', ctypes.c_uint32, 1), # [Bitfield]
('reserved', ctypes.c_uint32, 30), # [Bitfield]
]
# Extra names go here:
# End of extra names
class s_vivid_can_settings(ctypes.Structure):
_pack_ = 2
_anonymous_ = ("flags",)
_fields_ = [
('ecu_id', ctypes.c_uint32),
('can1', can_settings),
('swcan1', swcan_settings),
('lsftcan1', can_settings),
('network_enables', ctypes.c_uint16),
('network_enabled_on_boot', ctypes.c_uint16),
('iso15765_separation_time_offset', ctypes.c_uint16),
('perf_en', ctypes.c_uint16),
('pwr_man_timeout', ctypes.c_uint32),
('pwr_man_enable', ctypes.c_uint16),
('can_switch_mode', ctypes.c_uint16),
('termination_enables', ctypes.c_uint16),
('flags', flags),
]
# Extra names go here:
SVividCANSettings = s_vivid_can_settings
# End of extra names
|
python
|
"""Optimization methods."""
|
python
|
# -*- coding: utf-8 -*-
"""Build functions to wrap mlflow models.
Existing implementation returns a synchronous/blocking function. This decision
was taken because applying ML models is probably CPU-bound. Not having
unnecessary asynchronous code also makes testing simpler.
Current supports only the pyfunc flavour.
Copyright (C) 2022, Auto Trader UK
"""
from typing import Any, Callable, List
import pandas as pd
from mlflow.pyfunc import PyFuncModel # type: ignore
from pydantic import BaseModel
import fastapi_mlflow._mlflow_types as _mlflow_types
def build_predictor(model: PyFuncModel) -> Callable[[List[BaseModel]], Any]:
"""Build and return a function that wraps the mlflow model.
Currently supports only the `pyfunc`_ flavour of mlflow.
:param model: PyFuncModel
:return: Function suitable for mounting as a FastAPI endpoint or route.
Example::
model = load_model("/Users/me/path/to/local/model")
predictor = build_predictor(model)
.. _pyfunc: https://www.mlflow.org/docs/latest/python_api/mlflow.pyfunc.html
"""
request_type: Any = _mlflow_types.build_input_model(
model.metadata.get_input_schema()
)
return_type: Any = _mlflow_types.build_output_model(
model.metadata.get_output_schema()
)
def predictor(request: List[request_type]) -> List[return_type]:
df = pd.DataFrame([row.dict() for row in request], dtype=object)
return [return_type(prediction=row) for row in model.predict(df)]
return predictor
|
python
|
# Generated by Django 4.0.2 on 2022-02-04 17:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='products',
old_name='product_type',
new_name='category',
),
]
|
python
|
"""
Copyright 2019 IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
WAW implement deprecation mechanism using package 'deprecation'.
To deprecete a function (or any other piece of code), see example of code:
import deprecation
from _version import __version__
@deprecation.deprecated(
deprecated_in='2.2',
removed_in='2.6',
current_version=__version__,
details='Use the function_test.py script / main function with version 2.2 instead (--version 2.2).')
def main(argv):
...
Pytest prints warning and list of tests using deprecated code (__version__ >= deprecated_in).
In case of using unsupported code (__version__ >= removed_in), tests fail.
When tests fail because of unsupported code, unsupported code has to be removed (it is time to remove it)
and tests should be removed or updated to newer version of code (supported version if exists)
- only in case such tests do not already exist.
When changing __version__, it is also necessary to change it in README.md (url
https://img.shields.io/badge/WAW-<__version__>-BLUE.svg)
"""
__version__ = '2.2'
|
python
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
if parent_path not in sys.path:
sys.path.append(parent_path)
from paddle import fluid
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
try:
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.cli import ArgsParser
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.export_utils import save_infer_model, dump_infer_config
from ppdet.utils.check import check_config, check_version, check_py_func, enable_static_mode
except ImportError as e:
if sys.argv[0].find('static') >= 0:
logger.error("Importing ppdet failed when running static model "
"with error: {}\n"
"please try:\n"
"\t1. run static model under PaddleDetection/static "
"directory\n"
"\t2. run 'pip uninstall ppdet' to uninstall ppdet "
"dynamic version firstly.".format(e))
sys.exit(-1)
else:
raise e
def main():
cfg = load_config(FLAGS.config)
merge_config(FLAGS.opt)
check_config(cfg)
check_version()
main_arch = cfg.architecture
# Use CPU for exporting inference model instead of GPU
place = fluid.CPUPlace()
exe = fluid.Executor(place)
model = create(main_arch)
startup_prog = fluid.Program()
infer_prog = fluid.Program()
with fluid.program_guard(infer_prog, startup_prog):
with fluid.unique_name.guard():
inputs_def = cfg['TestReader']['inputs_def']
inputs_def['use_dataloader'] = False
feed_vars, _ = model.build_inputs(**inputs_def)
# postprocess not need in exclude_nms, exclude NMS in exclude_nms mode
test_fetches = model.test(feed_vars, exclude_nms=FLAGS.exclude_nms)
infer_prog = infer_prog.clone(True)
check_py_func(infer_prog)
exe.run(startup_prog)
checkpoint.load_params(exe, infer_prog, cfg.weights)
dump_infer_config(FLAGS, cfg)
save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
if __name__ == '__main__':
enable_static_mode()
parser = ArgsParser()
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory for storing the output model files.")
parser.add_argument(
"--exclude_nms",
action='store_true',
default=False,
help="Whether prune NMS for benchmark")
FLAGS = parser.parse_args()
main()
|
python
|
import time
from multiprocessing import Queue
from queue import Full
import numpy as np
from .sampler import Sampler
class FakeSampler(Sampler):
def __init__(self,
instance: np.ndarray,
sample_queue: Queue,
num_of_samples: int = 1000):
super(FakeSampler, self).__init__()
self.instance = instance
self.sample_queue = sample_queue
self.num_of_samples = num_of_samples
def run(self):
while True:
if self.sample_queue.full():
time.sleep(0.1)
continue
for _ in range(self.num_of_samples):
try:
self.sample_queue.put((np.ones_like(self.instance), 1),
block=True)
except Full:
break
|
python
|
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, find_packages
import re
VERSION_PATTERN = re.compile(r"(?m)^__version__\s*=\s*['\"](.+)['\"]$")
def get_version():
"""Return the current version as defined by yubico/yubico_version.py."""
with open('pyhsm/__init__.py', 'r') as f:
match = VERSION_PATTERN.search(f.read())
return match.group(1)
setup(
name='pyhsm',
version=get_version(),
description='Python code for talking to a YubiHSM',
author='Dain Nilsson',
author_email='[email protected]',
url='https://github.com/Yubico/python-pyhsm',
license='BSD 2 clause',
packages=find_packages(exclude=['test']),
entry_points={
'console_scripts': [
# tools
'yhsm-daemon = pyhsm.stick_daemon:main [daemon]',
'yhsm-decrypt-aead = pyhsm.tools.decrypt_aead:main',
'yhsm-generate-keys = pyhsm.tools.generate_keys:main',
'yhsm-keystore-unlock = pyhsm.tools.keystore_unlock:main',
'yhsm-linux-add-entropy = pyhsm.tools.linux_add_entropy:main',
# ksm
'yhsm-yubikey-ksm = pyhsm.ksm.yubikey_ksm:main [db,daemon]',
'yhsm-import-keys = pyhsm.ksm.import_keys:main',
'yhsm-db-export = pyhsm.ksm.db_export:main [db]',
'yhsm-db-import = pyhsm.ksm.db_import:main [db]',
# validation server
'yhsm-validation-server = pyhsm.val.validation_server:main',
'yhsm-validate-otp = pyhsm.val.validate_otp:main',
'yhsm-init-oath-token = pyhsm.val.init_oath_token:main'
]
},
test_suite='test.test_init',
tests_require=[],
install_requires=[
'pyserial >= 2.3',
'pycrypto >= 2.1'
],
extras_require={
'db': ['sqlalchemy'],
'daemon': ['python-daemon']
},
classifiers=[
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
]
)
|
python
|
import os.path as P
def relative_path(this_file , rel_path):
return P.join(P.dirname(this_file) , rel_path)
|
python
|
import pytest
from qualipy.filters import *
PATTERN = 'tests/images/pattern.jpg'
NON_PATTERN = 'tests/images/lama.jpg'
def test_recognizes_pattern():
assert Pattern().predict(PATTERN)
def test_doesnt_recognize_normal_image():
assert not Pattern().predict(NON_PATTERN)
def test_setting_threshold():
assert not Pattern(threshold=1).predict(PATTERN)
def test_inverting_threshold():
assert Pattern(1.01, invert_threshold=True).predict(PATTERN)
def test_can_return_float():
assert type(Pattern().predict(PATTERN, return_boolean=False)) != bool
def test_wrong_path_type_raises_exception():
with pytest.raises(TypeError):
assert Pattern().predict(0)
|
python
|
from . import backup
import logging
from .config import DropboxOfflineBackupConfig
def start():
logging.getLogger('').setLevel(logging.DEBUG)
console = logging.StreamHandler()
file_handler = logging.FileHandler(DropboxOfflineBackupConfig().config['DropboxBackup']['LogFile'])
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
console.setFormatter(formatter)
file_handler.setFormatter(formatter)
console.setLevel(logging.DEBUG)
file_handler.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
logging.getLogger('').addHandler(file_handler)
dropbox_logger = logging.getLogger("dropbox")
dropbox_logger.setLevel(logging.WARNING)
requests_logger = logging.getLogger("requests")
requests_logger.setLevel(logging.WARNING)
backup.DropboxOfflineBackup()
|
python
|
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import re
import time
import commonl.testing
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(
config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
],
errors_ignore = [
"Traceback"
])
@tcfl.tc.target(ttbd.url_spec + " and t0")
class _test(tcfl.tc.tc_c):
"""
Exercise the power/sequence call
"""
def eval(self, target):
try:
target.power.sequence()
raise tcfl.tc.failed_e(
"sequence() didn't raise TypeError on having no arguments")
except TypeError as e:
self.report_pass("sequence() complains ok about no arguments")
for bad_argument in [
# yeah, there might be a smarter way to define this
# ugly list
1, 'some string', dict(), 2.0, True, False,
[ ( 'wait' ) ],
[ ( 'wait', None ) ],
# turns out True and False map to 1 and 0 in Python...
#[ ( 'wait', True ) ],
#[ ( 'wait', False ) ],
[ ( 'wait', 'string' ) ],
[ ( 'wait', [ ] ) ],
[ ( 'wait', dict() ) ],
[ ( 'wait', {} ) ],
[ ( 'wait', () ) ],
# on operation
[ ( 'on' ) ],
[ ( 'on', None ) ],
[ ( 'on', True ) ],
[ ( 'on', False ) ],
[ ( 'on', [ ] ) ],
[ ( 'on', dict() ) ],
[ ( 'on', {} ) ],
[ ( 'on', () ) ],
[ ( 'on', 1 ) ],
[ ( 'on', 1.0 ) ],
# off operation
[ ( 'off' ) ],
[ ( 'off', None ) ],
[ ( 'off', True ) ],
[ ( 'off', False ) ],
[ ( 'off', [ ] ) ],
[ ( 'off', dict() ) ],
[ ( 'off', {} ) ],
[ ( 'off', () ) ],
[ ( 'off', 1 ) ],
[ ( 'off', 1.0 ) ],
# cycle operation
[ ( 'cycle' ) ],
[ ( 'cycle', None ) ],
[ ( 'cycle', True ) ],
[ ( 'cycle', False ) ],
[ ( 'cycle', [ ] ) ],
[ ( 'cycle', dict() ) ],
[ ( 'cycle', {} ) ],
[ ( 'cycle', () ) ],
[ ( 'cycle', 1 ) ],
[ ( 'cycle', 1.0 ) ],
# other things
[ ( 'invalid', None ) ],
[ ( 'invalid', True ) ],
[ ( 'invalid', False ) ],
[ ( 'invalid', [ ] ) ],
[ ( 'invalid', dict() ) ],
[ ( 'invalid', {} ) ],
[ ( 'invalid', () ) ],
[ ( 'invalid', 1 ) ],
[ ( 'invalid', 1.0 ) ],
]:
try:
target.power.sequence(bad_argument)
raise tcfl.tc.failed_e(
"sequence() didn't raise error on bad argument %s"
% bad_argument)
except tcfl.tc.error_e as e:
self.report_pass(
"server's sequence() complains ok about bad argument %s"
% bad_argument, dict(exception = e))
# "t0: power/sequence: remote call failed: 400: t0: t0: sequence #0: invalid type: expected list; got <type 'unicode'>"
# ->
# "invalid type: expected list; got <type 'unicode'>"
#
# so we can ignore it in the server's error log, which
# doesn't contain the part we are removing (added by
# the client)
ttbd.errors_ignore.append(
re.sub("^.*remote call failed.*sequence", "",
str(e.args[0])))
# now test basic operation
ts0 = time.time()
target.power.sequence([ ( 'wait', 3 ) ])
ts = time.time()
if ts - ts0 < 3:
raise tcfl.tc.failed_e(
"wait 3s took less than 3 seconds (%.1fs)" % ts-ts0)
if ts - ts0 > 3.5:
raise tcfl.tc.failed_e(
"wait 3s took less than 3.5 seconds (%.1fs)" % ts-ts0)
# now test basic operation
ts0 = time.time()
target.power.sequence([ ( 'wait', 3 ) ])
ts = time.time()
if ts - ts0 < 3:
raise tcfl.tc.failed_e(
"wait 3s took less than 3 seconds (%.1fs)" % ts-ts0)
if ts - ts0 > 3.5:
raise tcfl.tc.failed_e(
"wait 3s took less than 3.5 seconds (%.1fs)" % ts-ts0)
target.power.sequence([ ( 'on', 'power0' ) ])
state, substate, components = target.power.list()
assert components['power0']['state'] == True, components
target.power.sequence([ ( 'off', 'power0' ) ])
state, substate, components = target.power.list()
assert components['power0']['state'] == False, components
target.power.sequence([ ( 'cycle', 'power0' ) ])
state, substate, components = target.power.list()
assert components['power0']['state'] == True, components
target.power.sequence([ ( 'off', 'all' ) ])
state, substate, components = target.power.list()
assert state == False, ( state, substate, components )
assert substate == 'full', ( state, substate, components )
target.power.sequence([ ( 'on', 'all' ) ])
state, substate, components = target.power.list()
assert state == True, ( state, substate, components )
assert substate == 'full', ( state, substate, components )
target.power.sequence([
( 'off', 'all' ),
( 'wait', 1 ),
( 'on', 'power0' ),
( 'on', 'power1' ),
( 'on', 'power2' ),
( 'off', 'power1' ),
])
state, substate, components = target.power.list()
assert components['power0']['state'] == True, components
assert components['power1']['state'] == False, components
assert components['power2']['state'] == True, components
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
|
python
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/FamilyMemberHistory
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import familymemberhistory
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class FamilyMemberHistoryTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("FamilyMemberHistory", js["resourceType"])
return familymemberhistory.FamilyMemberHistory(js)
def testFamilyMemberHistory1(self):
inst = self.instantiate_from("familymemberhistory-example.json")
self.assertIsNotNone(
inst, "Must have instantiated a FamilyMemberHistory instance"
)
self.implFamilyMemberHistory1(inst)
js = inst.as_json()
self.assertEqual("FamilyMemberHistory", js["resourceType"])
inst2 = familymemberhistory.FamilyMemberHistory(js)
self.implFamilyMemberHistory1(inst2)
def implFamilyMemberHistory1(self, inst):
self.assertEqual(
force_bytes(inst.condition[0].code.coding[0].code), force_bytes("315619001")
)
self.assertEqual(
force_bytes(inst.condition[0].code.coding[0].display),
force_bytes("Myocardial Infarction"),
)
self.assertEqual(
force_bytes(inst.condition[0].code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.condition[0].code.text), force_bytes("Heart Attack")
)
self.assertTrue(inst.condition[0].contributedToDeath)
self.assertEqual(
force_bytes(inst.condition[0].note[0].text),
force_bytes(
"Was fishing at the time. At least he went doing someting he loved."
),
)
self.assertEqual(force_bytes(inst.condition[0].onsetAge.code), force_bytes("a"))
self.assertEqual(
force_bytes(inst.condition[0].onsetAge.system),
force_bytes("http://unitsofmeasure.org"),
)
self.assertEqual(
force_bytes(inst.condition[0].onsetAge.unit), force_bytes("yr")
)
self.assertEqual(inst.condition[0].onsetAge.value, 74)
self.assertEqual(inst.date.date, FHIRDate("2011-03-18").date)
self.assertEqual(inst.date.as_json(), "2011-03-18")
self.assertEqual(force_bytes(inst.id), force_bytes("father"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("12345"))
self.assertEqual(
force_bytes(inst.instantiatesUri[0]),
force_bytes("http://example.org/family-member-history-questionnaire"),
)
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.relationship.coding[0].code), force_bytes("FTH")
)
self.assertEqual(
force_bytes(inst.relationship.coding[0].display), force_bytes("father")
)
self.assertEqual(
force_bytes(inst.relationship.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-RoleCode"),
)
self.assertEqual(force_bytes(inst.sex.coding[0].code), force_bytes("male"))
self.assertEqual(force_bytes(inst.sex.coding[0].display), force_bytes("Male"))
self.assertEqual(
force_bytes(inst.sex.coding[0].system),
force_bytes("http://hl7.org/fhir/administrative-gender"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">Father died of a heart attack aged 74</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testFamilyMemberHistory2(self):
inst = self.instantiate_from("familymemberhistory-example-mother.json")
self.assertIsNotNone(
inst, "Must have instantiated a FamilyMemberHistory instance"
)
self.implFamilyMemberHistory2(inst)
js = inst.as_json()
self.assertEqual("FamilyMemberHistory", js["resourceType"])
inst2 = familymemberhistory.FamilyMemberHistory(js)
self.implFamilyMemberHistory2(inst2)
def implFamilyMemberHistory2(self, inst):
self.assertEqual(
force_bytes(inst.condition[0].code.coding[0].code), force_bytes("371041009")
)
self.assertEqual(
force_bytes(inst.condition[0].code.coding[0].display),
force_bytes("Embolic Stroke"),
)
self.assertEqual(
force_bytes(inst.condition[0].code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.condition[0].code.text), force_bytes("Stroke")
)
self.assertEqual(force_bytes(inst.condition[0].onsetAge.code), force_bytes("a"))
self.assertEqual(
force_bytes(inst.condition[0].onsetAge.system),
force_bytes("http://unitsofmeasure.org"),
)
self.assertEqual(
force_bytes(inst.condition[0].onsetAge.unit), force_bytes("yr")
)
self.assertEqual(inst.condition[0].onsetAge.value, 56)
self.assertEqual(force_bytes(inst.id), force_bytes("mother"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.relationship.coding[0].code), force_bytes("MTH")
)
self.assertEqual(
force_bytes(inst.relationship.coding[0].display), force_bytes("mother")
)
self.assertEqual(
force_bytes(inst.relationship.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-RoleCode"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">Mother died of a stroke aged 56</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
|
python
|
# -*- coding: utf-8 -*-
# Time : 2021/7/25 13:59
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
import json
import os
from datetime import datetime
from bs4 import BeautifulSoup
from selenium.common.exceptions import (
StaleElementReferenceException,
WebDriverException,
)
from selenium.webdriver import Chrome
from src.BusinessCentralLayer.setting import logger, SERVER_DIR_DATABASE, TIME_ZONE_CN
from src.BusinessLogicLayer.cluster.master import ActionMasterGeneral
class SSPanelParser(ActionMasterGeneral):
def __init__(self, url, silence=False, assault=True, anti_slider=True):
super(SSPanelParser, self).__init__(
url,
silence,
assault,
anti_slider=anti_slider,
)
self.obj_parser = {}
self.cache_db_name = "parser_cache"
self.cache_db_path = self.create_cache_db(database_dir=SERVER_DIR_DATABASE)
def create_cache_db(self, database_dir=None):
database_dir = "database" if database_dir is None else database_dir
if not os.path.exists(database_dir):
os.mkdir(database_dir)
cache_db = os.path.join(database_dir, self.cache_db_name)
if not os.path.exists(cache_db):
os.mkdir(cache_db)
return cache_db
def capture_cache(self, signs, flow):
output_path = os.path.join(self.cache_db_path, signs)
with open(output_path, "w", encoding="utf8") as f:
f.write(flow)
def parse(self, **kwargs):
"""
:return:
"""
api: Chrome = kwargs.get("api")
self.obj_parser.update({"parse_url": self.register_url})
# ----------------------------------------
# 解析可用流量和可用时长
# 优先调用,等待流体动画加载完成[耗时任务]
# 以保证后续解析无需等待
# ----------------------------------------
fluid = set()
fluid_density = []
i = 0
while True:
try:
i += 1
card_body = api.find_elements_by_xpath("//div[@class='card-body']")[:2]
card_body = [tag.text.strip() for tag in card_body]
fluid.update(card_body)
fluid_density.append(len(fluid))
# 流体释放
if len(fluid_density) < 10 or len(fluid) < 3:
continue
# 流体相对均衡
if max(fluid_density[:10]) == min(fluid_density[:10]):
self.obj_parser.update(
{"time": card_body[0], "flow": card_body[-1]}
)
break
except StaleElementReferenceException:
pass
# 存储cookie
with open("123.json", "w", encoding="utf8") as f:
f.write(json.dumps(api.get_cookies()))
# 读取cookie
# cookie_json = " ".join([f"{i['name']}={i['value']};" for i in json.loads(f.read())])
# ----------------------------------------
# 解析站点名称
# ----------------------------------------
try:
parse_name = api.find_element_by_xpath(
"//aside//div[@class='sidebar-brand']"
).text.strip()
self.obj_parser.update({"parse_name": parse_name})
except WebDriverException:
logger.error(
f"<SSPanelParserError> Site name resolution failed -- {self.register_url}"
)
# ----------------------------------------
# 解析站点公告
# ----------------------------------------
reference_links = {}
try:
card_body = api.find_elements_by_xpath("//div[@class='card-body']")[4]
self.obj_parser.update({"desc": card_body.text.strip()})
related_href = card_body.find_elements_by_tag_name("a")
for tag in related_href:
href = tag.get_attribute("href")
if href:
href = href.strip()
if "https" not in href:
href = f"{self.register_url}{href}"
href_desc = tag.text.strip() if tag.text else href
reference_links.update({href: href_desc})
self.obj_parser.update({"reference_links": reference_links})
except WebDriverException:
logger.error(
f"<SSPanelParserError> Site announcement parsing error -- {self.register_url}"
)
# ----------------------------------------
# 解析[链接导入]
# ----------------------------------------
subscribes = {}
support = []
try:
# 清洗订阅链接
soup = BeautifulSoup(api.page_source, "html.parser")
for i in soup.find_all("a"):
if i.get("data-clipboard-text"):
subscribes.update({i.get("data-clipboard-text"): i.text.strip()})
# 识别支持的订阅类型
buttons = api.find_elements_by_xpath("//div[@class='card'][2]//a")
for tag in buttons:
support_ = tag.get_attribute("class")
if support_:
support_ = [
i
for i in [i for i in support_.split() if i.startswith("btn-")]
if i
not in [
"btn-icon",
"btn-primary",
"btn-lg",
"btn-round",
"btn-progress",
]
]
if len(support_) == 1:
class_name = support_[0].replace("btn-", "")
support.append(class_name)
# 残差补全
for tag in subscribes.values():
if "surge" in tag.lower():
support.append("surge")
if "ssr" in tag.lower():
support.append("ssr")
self.obj_parser.update(
{"subscribes": subscribes, "support": list(set(support))}
)
except WebDriverException:
logger.error(
f"<SSPanelParserError> Site subscription resolution failed -- {self.register_url}"
)
self.obj_parser.update(
{
"email": self.email,
"password": self.password,
"recently_login": datetime.now(tz=TIME_ZONE_CN),
}
)
return self.obj_parser
def parse_by_login(self, **kwargs) -> dict:
return self.seep("login", self.parse, **kwargs)
def parse_by_register(self, **kwargs):
return self.seep("register", self.parse, **kwargs)
def refresh_cookie(self, **kwargs):
def get_cookie():
cookies = kwargs.get("api")
return json.dumps(cookies.get_cookies()) if cookies else {}
return self.seep("login", get_cookie, **kwargs)
def seep(self, method, business, **kwargs):
# 获取任务设置
api = self.set_spider_option()
# 执行核心业务逻辑
try:
self.get_html_handle(api=api, url=self.register_url, wait_seconds=45)
if method == "login":
self.sign_in(api, **kwargs)
elif method == "register":
self.sign_up(api)
self.wait(api, 40, "//div[@class='card-body']")
kwargs.setdefault("api", api)
return business(**kwargs)
finally:
api.quit()
|
python
|
import stweet as st
def test_return_tweets_objects():
phrase = '#koronawirus'
search_tweets_task = st.SearchTweetsTask(
all_words=phrase,
tweets_count=200
)
tweets_collector = st.CollectorTweetOutput()
result = st.TweetSearchRunner(
search_tweets_task=search_tweets_task,
tweet_outputs=[tweets_collector]
).run()
scrapped_tweets = tweets_collector.get_scrapped_tweets()
assert isinstance(result, st.SearchTweetsResult)
assert result.downloaded_count == len(scrapped_tweets)
assert result.downloaded_count > 0
assert all([phrase in it.full_text for it in scrapped_tweets if phrase in it.full_text]) is True
|
python
|
import lmfit
import numpy as np
from uncertainties import ufloat
from scipy.stats import sem
from collections import OrderedDict
from pycqed.analysis import fitting_models as fit_mods
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
from pycqed.analysis.tools.plotting import SI_val_to_msg_str
from pycqed.utilities.general import format_value_string
from copy import deepcopy
from pycqed.analysis.tools.data_manipulation import \
populations_using_rate_equations
class Single_Qubit_TimeDomainAnalysis(ba.BaseDataAnalysis):
def process_data(self):
'''
This takes care of rotating and normalizing the data if required.
this should work for several input types.
- I/Q values (2 quadratures + cal points)
- weight functions (1 quadrature + cal points)
- counts (no cal points)
There are several options possible to specify the normalization
using the options dict.
cal_points (tuple) of indices of the calibration points
zero_coord, one_coord
'''
cal_points = self.options_dict.get('cal_points', None)
zero_coord = self.options_dict.get('zero_coord', None)
one_coord = self.options_dict.get('one_coord', None)
# FIXME THIS IS A HACK related to recent issue
self.data_dict = self.raw_data_dict
if cal_points is None:
# default for all standard Timedomain experiments
cal_points = [list(range(-4, -2)), list(range(-2, 0))]
if len(self.raw_data_dict['measured_values']) == 1:
# if only one weight function is used rotation is not required
self.proc_data_dict['corr_data'] = a_tools.normalize_data_v3(
self.raw_data_dict['measured_values'][0],
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
else:
self.proc_data_dict['corr_data'], zero_coord, one_coord = \
a_tools.rotate_and_normalize_data(
data=self.raw_data_dict['measured_values'][0:2],
zero_coord=zero_coord,
one_coord=one_coord,
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
# This should be added to the hdf5 datafile but cannot because of the
# way that the "new" analysis works.
# self.add_dataset_to_analysisgroup('Corrected data',
# self.proc_data_dict['corr_data'])
class Idling_Error_Rate_Analyisis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
post_sel_th = self.options_dict.get('post_sel_th', 0.5)
raw_shots = self.raw_data_dict['measured_values'][0][0]
post_sel_shots = raw_shots[::2]
data_shots = raw_shots[1::2]
data_shots[np.where(post_sel_shots > post_sel_th)] = np.nan
states = ['0', '1', '+']
self.proc_data_dict['xvals'] = np.unique(self.raw_data_dict['xvals'])
for i, state in enumerate(states):
self.proc_data_dict['shots_{}'.format(state)] = data_shots[i::3]
self.proc_data_dict['yvals_{}'.format(state)] = \
np.nanmean(np.reshape(self.proc_data_dict['shots_{}'.format(state)],
(len(self.proc_data_dict['xvals']), -1),
order='F'), axis=1)
def prepare_plots(self):
# assumes that value names are unique in an experiment
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
self.plot_dicts['Prepare in {}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': xvals,
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': yvals,
'ylabel': 'Counts',
'yrange': [0, 1],
'xrange': self.options_dict.get('xrange', None),
'yunit': 'frac',
'setlabel': 'Prepare in {}'.format(state),
'do_legend': True,
'title': (self.raw_data_dict['timestamps'][0]+' - ' +
self.raw_data_dict['timestamps'][-1] + '\n' +
self.raw_data_dict['measurementstring'][0]),
'legend_pos': 'upper right'}
if self.do_fitting:
for state in ['0', '1', '+']:
self.plot_dicts['fit_{}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['fit {}'.format(state)]['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'fit |{}>'.format(state),
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['fit_text'] = {
'ax_id': 'main',
'box_props': 'fancy',
'xpos': 1.05,
'horizontalalignment': 'left',
'plotfn': self.plot_text,
'text_string': self.proc_data_dict['fit_msg']}
def analyze_fit_results(self):
fit_msg = ''
states = ['0', '1', '+']
for state in states:
fr = self.fit_res['fit {}'.format(state)]
fit_msg += 'Prep |{}> :\n\t'
fit_msg += format_value_string('$N_1$',
fr.params['N1'], end_char='\n\t')
fit_msg += format_value_string('$N_2$',
fr.params['N2'], end_char='\n')
self.proc_data_dict['fit_msg'] = fit_msg
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
mod = lmfit.Model(fit_mods.idle_error_rate_exp_decay)
mod.guess = fit_mods.idle_err_rate_guess.__get__(
mod, mod.__class__)
# Done here explicitly so that I can overwrite a specific guess
guess_pars = mod.guess(N=xvals, data=yvals)
vary_N2 = self.options_dict.get('vary_N2', True)
if not vary_N2:
guess_pars['N2'].value = 1e21
guess_pars['N2'].vary = False
# print(guess_pars)
self.fit_dicts['fit {}'.format(states[i])] = {
'model': mod,
'fit_xvals': {'N': xvals},
'fit_yvals': {'data': yvals},
'guess_pars': guess_pars}
# Allows fixing the double exponential coefficient
class Grovers_TwoQubitAllStates_Analysis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
close_figs: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs,
extract_only=extract_only, do_fitting=True)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
normalize_to_cal_points = self.options_dict.get(
'normalize_to_cal_points', True)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
for idx in [0, 1]:
yvals = list(self.raw_data_dict['measured_values_ord_dict'].values())[
idx][0]
self.proc_data_dict['ylabel_{}'.format(idx)] = \
self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
if normalize_to_cal_points:
yvals = a_tools.normalize_data_v3(yvals,
cal_zero_points=cal_points[idx][0],
cal_one_points=cal_points[idx][1])
self.proc_data_dict['yvals_{}'.format(idx)] = yvals
y0 = self.proc_data_dict['yvals_0']
y1 = self.proc_data_dict['yvals_1']
p_success = ((y0[0]*y1[0]) +
(1-y0[1])*y1[1] +
(y0[2])*(1-y1[2]) +
(1-y0[3])*(1-y1[3]))/4
print(y0[0]*y1[0])
print((1-y0[1])*y1[1])
print((y0[2])*(1-y1[2]))
print((1-y0[3])*(1-y1[3]))
self.proc_data_dict['p_success'] = p_success
def prepare_plots(self):
# assumes that value names are unique in an experiment
for i in [0, 1]:
yvals = self.proc_data_dict['yvals_{}'.format(i)]
xvals = self.raw_data_dict['xvals'][0]
ylabel = self.proc_data_dict['ylabel_{}'.format(i)]
self.plot_dicts['main_{}'.format(ylabel)] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_{}'.format(i)],
'ylabel': ylabel,
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': False,
'legend_pos': 'upper right'}
self.plot_dicts['limit_text'] = {
'ax_id': 'main_{}'.format(ylabel),
'box_props': 'fancy',
'xpos': 1.05,
'horizontalalignment': 'left',
'plotfn': self.plot_text,
'text_string': 'P succes = {:.3f}'.format(self.proc_data_dict['p_success'])}
class FlippingAnalysis(Single_Qubit_TimeDomainAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = True
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'sweep_points': 'sweep_points',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
# This analysis makes a hardcoded assumption on the calibration points
self.options_dict['cal_points'] = [list(range(-4, -2)),
list(range(-2, 0))]
self.numeric_params = []
if auto:
self.run_analysis()
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
# Even though we expect an exponentially damped oscillation we use
# a simple cosine as this gives more reliable fitting and we are only
# interested in extracting the frequency of the oscillation
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# This enforces the oscillation to start at the equator
# and ensures that any over/under rotation is absorbed in the
# frequency
guess_pars['amplitude'].value = 0.5
guess_pars['amplitude'].vary = True
guess_pars['offset'].value = 0.5
guess_pars['offset'].vary = True
self.fit_dicts['cos_fit'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
# In the case there are very few periods we fall back on a small
# angle approximation to extract the drive detuning
poly_mod = lmfit.models.PolynomialModel(degree=1)
# the detuning can be estimated using on a small angle approximation
# c1 = d/dN (cos(2*pi*f N) ) evaluated at N = 0 -> c1 = -2*pi*f
poly_mod.set_param_hint('frequency', expr='-c1/(2*pi)')
guess_pars = poly_mod.guess(x=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# Constraining the line ensures that it will only give a good fit
# if the small angle approximation holds
guess_pars['c0'].vary = True
guess_pars['c0'].value = 0.5
self.fit_dicts['line_fit'] = {
'model': poly_mod,
'fit_xvals': {'x': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
def analyze_fit_results(self):
sf_line = self._get_scale_factor_line()
sf_cos = self._get_scale_factor_cos()
self.proc_data_dict['scale_factor'] = self.get_scale_factor()
msg = 'Scale fact. based on '
if self.proc_data_dict['scale_factor'] == sf_cos:
msg += 'cos fit\n'
else:
msg += 'line fit\n'
msg += 'cos fit: {:.4f}\n'.format(sf_cos)
msg += 'line fit: {:.4f}'.format(sf_line)
self.raw_data_dict['scale_factor_msg'] = msg
# TODO: save scale factor to file
def get_scale_factor(self):
"""
Returns the scale factor that should correct for the error in the
pulse amplitude.
"""
# Model selection based on the Bayesian Information Criterion (BIC)
# as calculated by lmfit
if (self.fit_dicts['line_fit']['fit_res'].bic <
self.fit_dicts['cos_fit']['fit_res'].bic):
scale_factor = self._get_scale_factor_line()
else:
scale_factor = self._get_scale_factor_cos()
return scale_factor
def _get_scale_factor_cos(self):
# 1/period of the oscillation corresponds to the (fractional)
# over/under rotation error per gate
frequency = self.fit_dicts['cos_fit']['fit_res'].params['frequency']
# the square is needed to account for the difference between
# power and amplitude
scale_factor = (1+frequency)**2
phase = np.rad2deg(
self.fit_dicts['cos_fit']['fit_res'].params['phase']) % 360
# phase ~90 indicates an under rotation so the scale factor
# has to be larger than 1. A phase ~270 indicates an over
# rotation so then the scale factor has to be smaller than one.
if phase > 180:
scale_factor = 1/scale_factor
return scale_factor
def _get_scale_factor_line(self):
# 2/period (ref is 180 deg) of the oscillation corresponds
# to the (fractional) over/under rotation error per gate
frequency = self.fit_dicts['line_fit']['fit_res'].params['frequency']
scale_factor = (1+2*frequency)**2
# no phase sign check is needed here as this is contained in the
# sign of the coefficient
return scale_factor
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['sweep_points'],
'xlabel': self.raw_data_dict['xlabel'],
'xunit': self.raw_data_dict['xunit'], # does not do anything yet
'yvals': self.proc_data_dict['corr_data'],
'ylabel': 'Excited state population',
'yunit': '',
'setlabel': 'data',
'title': (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring']),
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'line fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['text_msg'] = {
'ax_id': 'main',
'ypos': 0.15,
'plotfn': self.plot_text,
'box_props': 'fancy',
'text_string': self.raw_data_dict['scale_factor_msg']}
class Intersect_Analysis(Single_Qubit_TimeDomainAnalysis):
"""
Analysis to extract the intercept of two parameters.
relevant options_dict parameters
ch_idx_A (int) specifies first channel for intercept
ch_idx_B (int) specifies second channel for intercept if same as first
it will assume data was taken interleaved.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True,
normalized_probability=False):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.normalized_probability = normalized_probability
self.params_dict = {'xlabel': 'sweep_name',
'xvals': 'sweep_points',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_A" and "ch_idx_B"
specified in the options dict. If ch_idx_A and ch_idx_B are the same
it will unzip the data.
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
# The channel containing the data must be specified in the options dict
ch_idx_A = self.options_dict.get('ch_idx_A', 0)
ch_idx_B = self.options_dict.get('ch_idx_B', 0)
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx_A]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx_A]
if ch_idx_A == ch_idx_B:
yvals = list(self.raw_data_dict['measured_values_ord_dict'].values())[
ch_idx_A][0]
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_A'] = yvals[::2]
self.proc_data_dict['yvals_B'] = yvals[1::2]
else:
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['yvals_A'] = list(self.raw_data_dict
['measured_values_ord_dict'].values())[ch_idx_A][0]
self.proc_data_dict['yvals_B'] = list(self.raw_data_dict
['measured_values_ord_dict'].values())[ch_idx_B][0]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_A'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_A']},
'fit_yvals': {'data': self.proc_data_dict['yvals_A']}}
self.fit_dicts['line_fit_B'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_B']},
'fit_yvals': {'data': self.proc_data_dict['yvals_B']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_A'].best_values
fr_1 = self.fit_res['line_fit_B'].best_values
c0 = (fr_0['c0'] - fr_1['c0'])
c1 = (fr_0['c1'] - fr_1['c1'])
c2 = (fr_0['c2'] - fr_1['c2'])
poly_coeff = [c0, c1, c2]
poly = np.polynomial.polynomial.Polynomial([fr_0['c0'],
fr_0['c1'], fr_0['c2']])
ic = np.polynomial.polynomial.polyroots(poly_coeff)
self.proc_data_dict['intersect_L'] = ic[0], poly(ic[0])
self.proc_data_dict['intersect_R'] = ic[1], poly(ic[1])
if (((np.min(self.proc_data_dict['xvals'])) < ic[0]) and
(ic[0] < (np.max(self.proc_data_dict['xvals'])))):
self.proc_data_dict['intersect'] = self.proc_data_dict['intersect_L']
else:
self.proc_data_dict['intersect'] = self.proc_data_dict['intersect_R']
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_A'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_A'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'A',
'title': (self.proc_data_dict['timestamps'][0] + ' \n' +
self.proc_data_dict['measurementstring'][0]),
'do_legend': True,
'legend_pos': 'upper right'}
if self.normalized_probability:
self.plot_dicts['main']['yrange'] = (0, 1)
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_B'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_B'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'B',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_A'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_A']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit A',
'do_legend': True}
self.plot_dicts['line_fit_B'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_B']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit B',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['intersect'][0],
self.proc_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['intersect'][0]],
'yvals': [self.proc_data_dict['intersect'][1]],
'line_kws': {'alpha': .5, 'color': 'gray',
'markersize': 15},
'marker': 'o',
'setlabel': 'Intercept: {:.3f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_intersect(self):
return self.proc_data_dict['intersect']
class CZ_1QPhaseCal_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract the intercept for a single qubit phase calibration
experiment
N.B. this is a less generic version of "Intersect_Analysis" and should
be deprecated (MAR Dec 2017)
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx" in options dict and
then splits the data for th
"""
self.proc_data_dict = OrderedDict()
# The channel containing the data must be specified in the options dict
ch_idx = self.options_dict['ch_idx']
yvals = list(self.raw_data_dict['measured_values_ord_dict'].values())[
ch_idx][0]
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_off'] = yvals[::2]
self.proc_data_dict['yvals_on'] = yvals[1::2]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_off'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_off']},
'fit_yvals': {'data': self.proc_data_dict['yvals_off']}}
self.fit_dicts['line_fit_on'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_on']},
'fit_yvals': {'data': self.proc_data_dict['yvals_on']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_off'].best_values
fr_1 = self.fit_res['line_fit_on'].best_values
ic = -(fr_0['c0'] - fr_1['c0'])/(fr_0['c1'] - fr_1['c1'])
self.proc_data_dict['zero_phase_diff_intersect'] = ic
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_off'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
'yrange': (0, 1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_on'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['line_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['zero_phase_diff_intersect'],
self.raw_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['zero_phase_diff_intersect']],
'yvals': [np.mean(self.proc_data_dict['xvals_on'])],
'line_kws': {'alpha': 0},
'setlabel': 'Intercept: {:.1f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_zero_phase_diff_intersect(self):
return self.proc_data_dict['zero_phase_diff_intersect']
class Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Very basic analysis to determine the phase of a single oscillation
that has an assumed period of 360 degrees.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
ch_idx: int=0,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.ch_idx = ch_idx
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
idx = self.ch_idx
normalize_to_cal_points = self.options_dict.get(
'normalize_to_cal_points', False)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
yvals = list(
self.raw_data_dict['measured_values_ord_dict'].values())[idx][0]
if normalize_to_cal_points:
yvals = a_tools.normalize_data_v3(
yvals, cal_zero_points=cal_points[idx][0],
cal_one_points=cal_points[idx][1])
self.proc_data_dict['yvals'] = yvals
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod = lmfit.Model(fit_mods.CosFunc)
cos_mod.guess = fit_mods.Cos_guess.__get__(cos_mod, cos_mod.__class__)
if not (self.options_dict.get('normalize_to_cal_points', False)):
t = self.raw_data_dict['xvals'][0]
data = self.proc_data_dict['yvals']
else:
t = self.raw_data_dict['xvals'][0][:-4]
data = self.proc_data_dict['yvals'][:-4]
self.fit_dicts['cos_fit'] = {
'model': cos_mod,
'guess_dict': {'frequency': {'value': 1/360, 'vary': False}},
'fit_xvals': {'t': t},
'fit_yvals': {'data': data}}
def analyze_fit_results(self):
fr = self.fit_res['cos_fit'].best_values
self.proc_data_dict['phi'] = np.rad2deg(fr['phase'])
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit',
'do_legend': True}
class Conditional_Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract quantities from a conditional oscillation.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
options_dict: dict=None, extract_only: bool=False,
cal_points='gef',
close_figs: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs,
extract_only=extract_only, do_fitting=True)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
# either "gef" or "ge"
self.cal_points = cal_points
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_osc" and
"ch_idx_spec" in the options dict and then splits the data for the
off and on cases
"""
self.proc_data_dict = OrderedDict()
# values stored in quantities of interest will be saved in the data file
self.proc_data_dict['quantities_of_interest'] = {}
qoi = self.proc_data_dict['quantities_of_interest']
# The channel containing the data must be specified in the options dict
ch_idx_spec = self.options_dict.get('ch_idx_spec', 0)
ch_idx_osc = self.options_dict.get('ch_idx_osc', 1)
qoi['ch_idx_osc'] = ch_idx_osc
qoi['ch_idx_spec'] = ch_idx_spec
normalize_to_cal_points = self.options_dict.get(
'normalize_to_cal_points', True)
if self.cal_points == 'gef':
# calibration point indices are when ignoring the f-state cal pts
cal_points = [
[[-7, -6], [-5, -4], [-2, -1]], # oscillating qubit
[[-7, -5], [-6, -4], [-3, -1]], # spec qubits
]
elif self.cal_points == 'ge':
# calibration point indices are when ignoring the f-state cal pts
cal_points = [
[[-4, -3], [-2, -1]], # oscillating qubits
[[-4, -2], [-3, -1]], # spec qubit
]
for idx, type_str in zip([ch_idx_osc, ch_idx_spec], ['osc', 'spec']):
yvals = list(self.raw_data_dict['measured_values_ord_dict'].values())[
idx][0]
self.proc_data_dict['ylabel_{}'.format(
type_str)] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
# This is in case of readout crosstalk making a difference between on and off cases
cals_osc_qubit = cal_points[0]
idx_cal_off = [c[1] for c in cals_osc_qubit]
idx_cal_on = [c[0] for c in cals_osc_qubit]
yvals_off = np.concatenate((yvals[:cals_osc_qubit[0][0]:2],
yvals[idx_cal_off]))
yvals_on = np.concatenate((yvals[1:cals_osc_qubit[0][0]:2],
yvals[idx_cal_on]))
if normalize_to_cal_points:
yvals_off = a_tools.normalize_TD_data(
data=yvals_off,
data_zero=yvals[cals_osc_qubit[0][1]],
data_one=yvals[cals_osc_qubit[1][1]])
yvals_on = a_tools.normalize_TD_data(
data=yvals_on,
data_zero=yvals[cals_osc_qubit[0][0]],
data_one=yvals[cals_osc_qubit[1][0]])
self.proc_data_dict['yvals_{}_off'.format(
type_str)] = yvals_off
self.proc_data_dict['yvals_{}_on'.format(
type_str)] = yvals_on
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
else:
self.proc_data_dict['yvals_{}_off'.format(
type_str)] = yvals[::2]
self.proc_data_dict['yvals_{}_on'.format(
type_str)] = yvals[1::2]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
V0 = np.mean(yvals[cal_points[idx][0]])
V1 = np.mean(yvals[cal_points[idx][1]])
if self.cal_points != 'gef':
V2 = V1#np.mean(yvals[cal_points[idx][2]])
else:
V2 = V1
self.proc_data_dict['V0_{}'.format(type_str)] = V0
self.proc_data_dict['V1_{}'.format(type_str)] = V1
self.proc_data_dict['V2_{}'.format(type_str)] = V2
if type_str == 'osc':
# The offset in the oscillation is the leakage indicator
SI = [np.mean(self.proc_data_dict[
'yvals_{}_on'.format(type_str)])]
# The mean of the oscillation SI is the same as SX
SX = SI
P0, P1, P2, M_inv = populations_using_rate_equations(
SI, SX, V0, V1, V2)
# Leakage based on the average of the oscillation
qoi['leak_avg'] = P2[0] # list with 1 elt...
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod0 = lmfit.Model(fit_mods.CosFunc)
cos_mod0.guess = fit_mods.Cos_guess.__get__(
cos_mod0, cos_mod0.__class__)
self.fit_dicts['cos_fit_off'] = {
'model': cos_mod0,
'guess_dict': {'frequency': {'value': 1/360, 'vary': False}},
'fit_xvals': {'t': self.proc_data_dict['xvals_off'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_off'][:-4]}}
cos_mod1 = lmfit.Model(fit_mods.CosFunc)
cos_mod1.guess = fit_mods.Cos_guess.__get__(
cos_mod1, cos_mod1.__class__)
self.fit_dicts['cos_fit_on'] = {
'model': cos_mod1,
'guess_dict': {'frequency': {'value': 1/360, 'vary': False}},
'fit_xvals': {'t': self.proc_data_dict['xvals_on'][:-3]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_on'][:-3]}}
def analyze_fit_results(self):
qoi = self.proc_data_dict['quantities_of_interest']
fr_0 = self.fit_res['cos_fit_off']
fr_1 = self.fit_res['cos_fit_on']
phi0 = ufloat(np.rad2deg(fr_0.params['phase'].value),
np.rad2deg(fr_0.params['phase'].stderr if
fr_0.params['phase'].stderr is not None
else np.nan))
phi1 = ufloat(np.rad2deg(fr_1.params['phase'].value),
np.rad2deg(fr_1.params['phase'].stderr if
fr_1.params['phase'].stderr is not None
else np.nan))
qoi['phi_0'] = phi0
qoi['phi_1'] = phi1
qoi['phi_cond'] = (phi0-phi1) % 360
qoi['osc_amp_0'] = ufloat(fr_0.params['amplitude'].value,
fr_0.params['amplitude'].stderr if
fr_0.params['amplitude'].stderr is not None
else np.nan)
qoi['osc_amp_1'] = ufloat(fr_1.params['amplitude'].value,
fr_1.params['amplitude'].stderr if
fr_1.params['amplitude'].stderr is not None
else np.nan)
qoi['osc_offs_0'] = ufloat(fr_0.params['offset'].value,
fr_0.params['offset'].stderr if
fr_0.params['offset'].stderr is not None
else np.nan)
qoi['osc_offs_1'] = ufloat(fr_1.params['offset'].value,
fr_1.params['offset'].stderr if
fr_1.params['offset'].stderr is not None
else np.nan)
qoi['offs_diff'] = qoi['osc_offs_1'] - qoi['osc_offs_0']
spec_on = ufloat(np.mean(self.proc_data_dict['yvals_spec_on'][:-3]),
sem(self.proc_data_dict['yvals_spec_on'][:-3]))
spec_off = ufloat(np.mean(self.proc_data_dict['yvals_spec_off'][:-3]),
sem(self.proc_data_dict['yvals_spec_off'][:-3]))
qoi['missing_fraction'] = spec_on-spec_off
def prepare_plots(self):
self._prepare_main_oscillation_figure()
self._prepare_spectator_qubit_figure()
def _prepare_main_oscillation_figure(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_off'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_on'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['cos_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
# offset as a guide for the eye
y = self.fit_res['cos_fit_off'].params['offset'].value
self.plot_dicts['cos_off_offset'] = {
'plotfn': self.plot_matplot_ax_method,
'ax_id': 'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C0', 'linestyle': 'dotted'}
}
qoi = self.proc_data_dict['quantities_of_interest']
phase_message = (
'Phase diff.: {} deg\n'
'Phase off: {} deg\n'
'Phase on: {} deg\n\n'
'Offs. diff.: {} %\n'
'Osc. offs. off: {} \n'
'Osc. offs. on: {}\n\n'
'Osc. amp. off: {} \n'
'Osc. amp. on: {} '.format(
qoi['phi_cond'],
qoi['phi_0'], qoi['phi_1'],
qoi['offs_diff']*100,
qoi['osc_offs_0'], qoi['osc_offs_1'],
qoi['osc_amp_0'], qoi['osc_amp_1']))
self.plot_dicts['phase_message'] = {
'ax_id': 'main',
'ypos': 0.9,
'xpos': 1.45,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'text_string': phase_message}
def _prepare_spectator_qubit_figure(self):
self.plot_dicts['spectator_qubit'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_off'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['spec_on'] = {
'plotfn': self.plot_line,
'ax_id': 'spectator_qubit',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_on'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
leak_msg = (
'Missing fraction: {} % '.format(
self.proc_data_dict['quantities_of_interest']
['missing_fraction']*100))
self.plot_dicts['leak_msg'] = {
'ax_id': 'spectator_qubit',
'ypos': 0.7,
'xpos': 1.05,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'horizontalalignment': 'left',
'text_string': leak_msg}
# offset as a guide for the eye
y = self.fit_res['cos_fit_on'].params['offset'].value
self.plot_dicts['cos_on_offset'] = {
'plotfn': self.plot_matplot_ax_method,
'ax_id': 'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C1', 'linestyle': 'dotted'}
}
|
python
|
def zk_demo(cfg,V,F,ZMvtk) : #function max_abs_diff = demo( V,F,ZMvtk )
#
num_vertices = 3 # num_vertices = 3;
num_facets = cfg.size(F,0) #! # num_facets = size(F,1);
N = 20 # N = 20; %code will only work upto order 20 (can be fixed later)
#
# % ZERNIKE MOMENTS
G = cfg.geometric_moments_orig(V,F,N,num_facets,num_vertices) # G=geometric_moments_orig(V,F,N,num_facets,num_vertices);
Z = cfg.zernike(G,N) # Z=zernike(G,N);
Descriptors = cfg.feature_extraction(Z,N) # Descriptors=feature_extraction(Z,N);
# ZM= reformat_zernike(Z,N);
#
# %compare to your code
# max_abs_diff = max(abs(abs(ZM) - abs(ZMvtk))); %error should be around 1e-8
return Descriptors
|
python
|
from django.urls import path
from . import api_views, views
urlpatterns = [
path('', views.firework_home, name='fireworks-home'),
path('add/', views.add_firework, name='fireworks-add'),
path('categories', views.category, name='fireworks-categories'),
path('category/<slug:category_slug>/', views.category, name='fireworks-category'),
path('category/<slug:category_slug>/<slug:firework_slug>/', views.firework_detail, name='fireworks-detail'),
path('favorites/<int:pk>/', views.favorites, name='fireworks-favorites'),
path('manufacturer/<slug:manufacturer_slug>/', views.manufacturer, name='fireworks-manufacturer'),
path('manufacturers/', views.manufacturers, name='fireworks-manufacturers'),
]
|
python
|
from pydantic import BaseModel
from starlette.requests import Request
from fastapi.security.base import SecurityBase
from fastapi.security.utils import get_authorization_scheme_param
from typing import Optional
from starlette.status import HTTP_403_FORBIDDEN, HTTP_401_UNAUTHORIZED
from fastapi.exceptions import HTTPException
class AuthFlowsModel(BaseModel):
pass
class AuthFlows(BaseModel):
token: Optional[AuthFlowsModel] = None
class Auth(SecurityBase):
def __init__(
self,
*,
scheme_name: Optional[str] = None,
auto_error: Optional[bool] = True
):
self.model = AuthFlows()
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(self, request: Request) -> Optional[str]:
authorization: str = request.headers.get("Authorization")
if not authorization:
if self.auto_error:
raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail="Not authenticated")
else:
return None
return authorization
class BearerAuth(Auth):
def __init__(
self,
scheme_name: Optional[str] = None,
auto_error: bool = True,
):
super().__init__(scheme_name=scheme_name, auto_error=auto_error)
async def __call__(self, request: Request) -> Optional[str]:
authorization: str = request.headers.get("Authorization")
scheme, param = get_authorization_scheme_param(authorization)
if not authorization or scheme.lower() != "bearer":
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
else:
return None
return param
|
python
|
print(1)<caret>
|
python
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.affine.trace import trace
from cvxpy.expressions.variable import Variable
def normNuc_canon(expr, args):
A = args[0]
m, n = A.shape
# Create the equivalent problem:
# minimize (trace(U) + trace(V))/2
# subject to:
# [U A; A.T V] is positive semidefinite
X = Variable((m+n, m+n), PSD=True)
constraints = []
# Fix X using the fact that A must be affine by the DCP rules.
# X[0:rows,rows:rows+cols] == A
constraints.append(X[0:m, m:m+n] == A)
trace_value = 0.5 * trace(X)
return trace_value, constraints
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
from torchknickknacks import metrics
x1 = torch.rand(100,)
x2 = torch.rand(100,)
r = metrics.pearson_coeff(x1, x2)
x = torch.rand(100, 30)
r_pairs = metrics.pearson_coeff_pairs(x)
|
python
|
# Based off of https://github.com/getninjas/celery-executor/
#
# Apache Software License 2.0
#
# Copyright (c) 2018, Alan Justino da Silva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os import environ as env
from concurrent.futures import as_completed
from concurrent.futures._base import (RUNNING, FINISHED, CANCELLED,
CANCELLED_AND_NOTIFIED)
from threading import Lock, Thread
import time
from logging import NullHandler, StreamHandler
from logging.handlers import SocketHandler
from terra.executor.base import BaseFuture, BaseExecutor
from terra import settings
from terra.logger import getLogger
logger = getLogger(__name__)
class CeleryExecutorFuture(BaseFuture):
def __init__(self, asyncresult):
self._ar = asyncresult
super().__init__()
def __del__(self):
self._ar.forget()
del self._ar
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
logger.debug4(f'Canceling task {self._ar.id}')
with self._condition:
if self._state in [RUNNING, FINISHED, CANCELLED, CANCELLED_AND_NOTIFIED]:
return super().cancel()
# Not running and not cancelled. May be possible to cancel!
self._ar.ready() # Triggers an update check
if self._ar.state != 'REVOKED':
self._ar.revoke()
self._ar.ready()
# Celery task should be REVOKED now. Otherwise may be not possible
# revoke it.
if self._ar.state == 'REVOKED':
result = super().cancel()
if not result: # pragma: no cover
logger.error('Please open an issue on Github: Upstream '
'implementation changed?')
else:
# Is not running nor revoked nor finished :/
# The revoke() had not produced effect: Task is probable not on a
# worker, then not revoke-able.
# Setting as RUNNING to inhibit super() from cancelling the Future,
# then putting back.
initial_state = self._state
self._state = RUNNING
result = super().cancel()
if result: # pragma: no cover
logger.error('Please open an issue on Github: Upstream '
'implementation changed?')
self._state = initial_state
return result
class CeleryExecutor(BaseExecutor):
"""
Executor implementation using celery tasks.
Parameters
----------
predelay
Will trigger before the `.apply_async` internal call
postdelay
Will trigger before the `.apply_async` internal call
applyasync_kwargs
Options passed to the `.apply_async()` call
retry_kwargs
Options passed to the `.retry()` call on errors
retry_queue
Sugar to set an alternative queue specially for errors
update_delay
Delay time between checks for Future state changes
"""
# This is only true when using prefork, eventlet, gevent, and solo are
# single process
multiprocess = True
def __init__(self, predelay=None, postdelay=None, applyasync_kwargs=None,
retry_kwargs=None, retry_queue='', update_delay=0.1,
max_workers=None):
# Options about calling the Task
self._predelay = predelay
self._postdelay = postdelay
self._applyasync_kwargs = applyasync_kwargs or {}
self._retry_kwargs = retry_kwargs or {}
if retry_queue:
self._retry_kwargs['queue'] = retry_queue
self._retry_kwargs.setdefault('max_retries', 1)
self._retry_kwargs.setdefault('max_retries', 0)
# Options about managing this Executor flow
self._update_delay = update_delay
self._shutdown = False
self._shutdown_lock = Lock()
self._futures = set()
self._monitor_started = False
self._monitor_stopping = False
self._monitor = Thread(target=self._update_futures)
self._monitor.daemon = True
def _update_futures(self):
while True:
time.sleep(self._update_delay) # Not-so-busy loop
if self._monitor_stopping:
return
for fut in tuple(self._futures):
if fut._state in (FINISHED, CANCELLED_AND_NOTIFIED):
# This Future is set and done. Nothing else to do.
self._futures.remove(fut)
continue
ar = fut._ar
ar.ready() # Just trigger the AsyncResult state update check
if ar.state == 'REVOKED':
logger.warning('Celery task "%s" cancelled.', ar.id)
if not fut.cancelled():
if not fut.cancel(): # pragma: no cover
logger.error('Future was not running but failed to be cancelled')
fut.set_running_or_notify_cancel()
# Future is CANCELLED -> CANCELLED_AND_NOTIFIED
elif ar.state in ('RUNNING', 'RETRY'):
logger.debug4('Celery task "%s" running.', ar.id)
if not fut.running():
fut.set_running_or_notify_cancel()
# Future is RUNNING
elif ar.state == 'SUCCESS':
logger.debug4('Celery task "%s" resolved.', ar.id)
fut.set_result(ar.get(disable_sync_subtasks=False))
# Future is FINISHED
elif ar.state == 'FAILURE':
logger.error('Celery task "%s" resolved with error.', ar.id)
exc = ar.result
exc = type(exc)(
f'{str(exc)}\n\nThe task stack trace:\n\n{ar.traceback}')
fut.set_exception(exc)
# Future is FINISHED
# else: # ar.state in [RECEIVED, STARTED, REJECTED, RETRY]
# pass
def submit(self, fn, *args, **kwargs):
"""
""" # Original python comment has * and isn't napoleon compatible
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
if not self._monitor_started:
self._monitor.start()
self._monitor_started = True
# metadata = {
# 'retry_kwargs': self._retry_kwargs.copy()
# }
if self._predelay:
self._predelay(fn, *args, **kwargs)
# asyncresult = _celery_call.apply_async((fn, metadata) + args, kwargs,
# **self._applyasync_kwargs)
asyncresult = fn.apply_async(args, kwargs)
if self._postdelay:
self._postdelay(asyncresult)
future = CeleryExecutorFuture(asyncresult)
self._futures.add(future)
return future
def shutdown(self, wait=True):
logger.debug1('Shutting down celery tasks...')
with self._shutdown_lock:
self._shutdown = True
for fut in tuple(self._futures):
fut.cancel()
if wait:
for _ in as_completed(self._futures):
pass
self._monitor_stopping = True
try:
self._monitor.join()
except RuntimeError: # pragma: no cover
# Thread never started. Cannot join
pass
@staticmethod
def configuration_map(service_info):
from terra.compute import compute
service_name = env['TERRA_CELERY_SERVICE']
class ServiceClone:
def __init__(self, service_info):
self.compose_service_name = service_name
if hasattr(service_info, 'justfile'):
self.justfile = service_info.justfile
if hasattr(service_info, 'compose_files'):
self.compose_files = service_info.compose_files
self.env = env # .copy()
self.volumes = []
service_clone = ServiceClone(service_info)
if hasattr(compute, 'config'):
config = compute.config(service_clone)
else:
config = None
volume_map = compute.get_volume_map(config, service_clone)
return volume_map
@staticmethod
def configure_logger(sender, **kwargs):
if settings.terra.zone == 'task': # pragma: no cover
# This won't ever be reached because the task_controller will configure
# the logger, and then fork.
sender.main_log_handler = NullHandler()
elif settings.terra.zone == 'task_controller':
# Setup log file for use in configure
if settings.logging.log_file:
os.makedirs(os.path.dirname(settings.logging.log_file), exist_ok=True)
sender._log_file = settings.logging.log_file
else:
sender._log_file = os.devnull
os.makedirs(settings.processing_dir, exist_ok=True)
sender._log_file = open(sender._log_file, 'a')
sender.main_log_handler = StreamHandler(stream=sender._log_file)
sender.root_logger.addHandler(sender.main_log_handler)
@staticmethod
def reconfigure_logger(sender, pre_run_task=False,
post_settings_context=False, **kwargs):
if settings.terra.zone == 'task':
if pre_run_task:
if sender.main_log_handler:
sender.main_log_handler.close()
try:
sender.root_logger.removeHandler(sender.main_log_handler)
except ValueError:
pass
sender.main_log_handler = SocketHandler(
settings.logging.server.hostname,
settings.logging.server.port)
sender.root_logger.addHandler(sender.main_log_handler)
if post_settings_context:
# when the celery task is done, its logger is automatically
# reconfigured; use that opportunity to close the stream
if sender.main_log_handler:
sender.main_log_handler.close()
try:
sender.root_logger.removeHandler(sender.main_log_handler)
except ValueError:
pass
sender.main_log_handler = NullHandler()
sender.root_logger.addHandler(sender.main_log_handler)
elif settings.terra.zone == 'task_controller':
if settings.logging.log_file:
os.makedirs(os.path.dirname(settings.logging.log_file), exist_ok=True)
log_file = settings.logging.log_file
else:
log_file = os.devnull
if log_file != sender._log_file.name:
os.makedirs(settings.processing_dir, exist_ok=True)
sender._log_file.close()
sender._log_file = open(log_file, 'a')
|
python
|
import os
import sys
import json
from pathlib import Path
import shutil
import argparse
from wikiutils import split_wiki, sample_docs, cleanup_paths, save_stats
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--lang", required=True, help="language of the wiki-dump to download / preprocess", type=str)
parser.add_argument("-n", "--number_docs", default=160000, help="number of documents to randomly sample into docs_lm", type=int)
parser.add_argument("-m", "--min_doc_length", default=1800, help="minimum document length (characters) of sampled documents", type=int)
parser.add_argument("--mirror", default='dumps.wikimedia.org', help="wikipedia mirror - default dumps.wikimedia.org", type=str)
parser.add_argument('--cleanup', default=False, action='store_true')
args = parser.parse_args()
lang = args.lang
min_doc_length = args.min_doc_length
number_docs = args.number_docs
mirror = args.mirror
cleanup = args.cleanup
name = f'{lang}wiki'
xml_name = f'{name}-latest-pages-articles.xml'
zip_name = f'{xml_name}.bz2'
path_data = Path('/data')
path_wiki = path_data/name
path_docs = path_data/name/'docs'/'all'
path_dump = path_data/name/'dump'
path_extract = path_dump/'extract'
path_lm = path_data/name/'docs'/'sampled'
# CLEANUP - if --cleanup is set remove directorys and EXIT!
if cleanup:
paths = [path_docs, path_extract]
cleanup_paths(paths)
sys.exit(f'Exiting after clean up!')
if not path_wiki.exists():
os.mkdir(path_wiki)
os.mkdir(path_wiki/'docs')
if not path_data.exists():
os.mkdir(path_data)
else:
print(f'{path_data} already exists')
# DOWNLOAD wikipedia dump
if not (path_dump/xml_name).exists():
if not path_dump.exists():
print(f'creating {path_dump}')
os.mkdir(path_dump)
# xml does not exist -> download?
if not (path_dump/zip_name).exists():
print(f'downloading {zip_name}')
# zip does not exist -> download!
os.system(f'wget --no-check-certificate -P {path_dump.absolute()}/ https://{mirror}/{name}/latest/{name}-latest-pages-articles.xml.bz2')
print(f'unpacking {zip_name}')
os.system(f'bzip2 -d {path_dump}/{zip_name}')
# EXTRACT with wikiextractor
if not path_extract.exists():
print(f'creating {path_extract}')
print('running wikiextractor')
os.system(f'wikiextractor --no-templates -b 100G -q -o /data/{name}/dump/extract/ /data/{name}/dump/{name}-latest-pages-articles.xml')
else:
print(f'{path_extract} exists - not extracting')
# SPLIT wiki
print('splitting wiki')
if not path_docs.exists():
print(f'creating {path_docs}')
split_wiki(path_extract, path_docs, lang)
else:
print(f'path {path_docs} exists - not splitting')
# SAMPLE n-docs
n_words = 0
n_docs = 0
print(f'sampling {number_docs} docs')
if not path_lm.exists():
print(f'creating {path_lm}')
os.mkdir(path_lm)
n_words, n_docs = sample_docs(path_docs, path_lm, min_doc_length, number_docs)
else:
print(f'{path_lm} exists - skipping sampling documents!')
save_stats(path_wiki/'docs', n_docs, n_words, min_doc_length)
print(f'sucessfully prepared {name} - {path_lm}, number of docs {n_docs}/{number_docs} with {n_words} words / tokens!')
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import uuidfield.fields
class Migration(migrations.Migration):
dependencies = [
('schedule', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EventLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action', models.CharField(default=b'log', max_length=b'100', choices=[(b'acknowledge', b'acknowledge'), (b'resolve', b'resolve'), (b'silence_service', b'silence service'), (b'silence_incident', b'silence incident'), (b'forward', b'forward'), (b'log', b'log'), (b'notified', b'notified'), (b'notification_failed', b'notification failed'), (b'trigger', b'trigger')])),
('data', models.TextField()),
('occurred_at', models.DateTimeField()),
],
options={
'verbose_name': 'eventlog',
'verbose_name_plural': 'eventlog',
},
),
migrations.CreateModel(
name='Incident',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('incident_key', models.CharField(max_length=200)),
('event_type', models.CharField(max_length=15)),
('description', models.CharField(max_length=200)),
('details', models.TextField()),
('occurred_at', models.DateTimeField()),
],
options={
'verbose_name': 'incidents',
'verbose_name_plural': 'incidents',
},
),
migrations.CreateModel(
name='IncidentSilenced',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('silenced', models.BooleanField(default=False)),
('silenced_until', models.DateTimeField()),
('incident', models.ForeignKey(to='openduty.Incident')),
],
),
migrations.CreateModel(
name='SchedulePolicy',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80)),
('repeat_times', models.IntegerField()),
],
options={
'verbose_name': 'schedule_policy',
'verbose_name_plural': 'schedule_policies',
},
),
migrations.CreateModel(
name='SchedulePolicyRule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('position', models.IntegerField()),
('escalate_after', models.IntegerField()),
('schedule', models.ForeignKey(blank=True, to='schedule.Calendar', null=True)),
('schedule_policy', models.ForeignKey(related_name='rules', to='openduty.SchedulePolicy')),
('user_id', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'schedule_policy_rule',
'verbose_name_plural': 'schedule_policy_rules',
},
),
migrations.CreateModel(
name='Service',
fields=[
('name', models.CharField(unique=True, max_length=80)),
('id', uuidfield.fields.UUIDField(primary_key=True, serialize=False, editable=False, max_length=32, blank=True, unique=True)),
('retry', models.IntegerField(null=True, blank=True)),
('escalate_after', models.IntegerField(null=True, blank=True)),
('notifications_disabled', models.BooleanField(default=False)),
('policy', models.ForeignKey(blank=True, to='openduty.SchedulePolicy', null=True)),
],
options={
'verbose_name': 'service',
'verbose_name_plural': 'service',
},
),
migrations.CreateModel(
name='ServiceSilenced',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('silenced', models.BooleanField(default=False)),
('silenced_until', models.DateTimeField()),
('service', models.ForeignKey(to='openduty.Service')),
],
),
migrations.CreateModel(
name='ServiceTokens',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=80)),
('service_id', models.ForeignKey(to='openduty.Service')),
],
options={
'verbose_name': 'service_tokens',
'verbose_name_plural': 'service_tokens',
},
),
migrations.CreateModel(
name='Token',
fields=[
('key', models.CharField(max_length=40, serialize=False, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('phone_number', models.CharField(max_length=50)),
('pushover_user_key', models.CharField(max_length=50)),
('pushover_app_key', models.CharField(max_length=50)),
('slack_room_name', models.CharField(max_length=50)),
('prowl_api_key', models.CharField(max_length=50, blank=True)),
('prowl_application', models.CharField(max_length=256, blank=True)),
('prowl_url', models.CharField(max_length=512, blank=True)),
('user', models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='servicetokens',
name='token_id',
field=models.ForeignKey(to='openduty.Token'),
),
migrations.AddField(
model_name='incident',
name='service_key',
field=models.ForeignKey(to='openduty.Service'),
),
migrations.AddField(
model_name='eventlog',
name='incident_key',
field=models.ForeignKey(to='openduty.Incident', blank=True),
),
migrations.AddField(
model_name='eventlog',
name='service_key',
field=models.ForeignKey(to='openduty.Service'),
),
migrations.AddField(
model_name='eventlog',
name='user',
field=models.ForeignKey(related_name='users', default=None, blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AlterUniqueTogether(
name='incident',
unique_together=set([('service_key', 'incident_key')]),
),
]
|
python
|
from .base import Dashboard
|
python
|
from django import forms
from django.contrib import messages
class InputForm(forms.Form):
query = forms.CharField(max_length=200)
|
python
|
#!/usr/bin/python
import base64
import binascii
# CONSTANTS
WORDLISTFILE = r'OSPD4.txt'
SET1CH4INPUTFILE = r'4.txt'
SET1CH6INPUTFILE = r'6.txt'
# Functions
def hextobase64(hexstring):
bin = binascii.unhexlify(hexstring)
return base64.b64encode(bin).decode()
def xor(binary1, binary2):
return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(binary1, binary2))
def xor_key(data, key):
l = len(key)
buff = []
for idx, val in enumerate(data):
buff.append(chr(ord(val) ^ ord(key[idx % l])))
return ''.join(buff)
def readwordlist(filepath):
words = []
f = open(filepath, 'r')
for line in f:
words.append(str.split(line, ' ')[0].lower())
if f:
f.close()
return words
def scoresentence(sentence, words):
score = 0
for word in sentence.split(' '):
for w in words:
if w.lower() == word.lower():
score += 1
break
return score
def findsinglebytexorcipher(hexstring, words):
highscore = {}
for i in range(ord('A'), ord('Z') + 1):
result = ''
for j in range(0, len(hexstring), 2):
result += xor_key(binascii.unhexlify(hexstring[j:j+2]), chr(i))
score = scoresentence(result, words)
highscore[i] = (score, result, chr(i))
result = []
for score in highscore.items():
if score[1][0] > 0:
result.append(score)
return result
def detectsinglecharacterxor(words):
result = []
f = open(SET1CH4INPUTFILE, 'r')
i = 1
for line in f:
print i ,
i += 1
r = findsinglebytexorcipher(line.replace('\n', ''), words)
if len(r) > 0:
result.append(r)
for res in r:
print 'FOUND!'
print 'Key: %s' % res[1][2]
print 'Result: %s' % res[1][1]
if f:
f.close()
return result
def hammingdistance(str1, str2):
assert len(str1) == len(str2)
return sum(ch1 != ch2 for ch1, ch2 in zip(bin(int(binascii.hexlify(str1), 16)), bin(int(binascii.hexlify(str2), 16))))
def b64filetobinary(filepath):
f = open(SET1CH6INPUTFILE, 'r')
l = ''
for line in f:
l += line.replace('\n', '')
if f:
f.close()
return base64.b64decode(l)
def tryguesskeysize(data):
for keysize in range(2, 40):
#while len(data) > index + keysize * 2:
distance = 0
for index in range(0, keysize * 4, keysize):
distance += hammingdistance(data[index:index+keysize], data[index+keysize:index+keysize*2])
print 'Key: %d, distance: %d' % (keysize, distance / keysize)
def breakrepeatingkeyxor(data, keysize):
# Main
if __name__ == '__main__':
print '''
Set 1: Challange 1:
Convert hex to base 64
'''
inputset1ch1 = r'49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d'
expectedresultset1ch1 = r'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t'
assert expectedresultset1ch1 == hextobase64(inputset1ch1)
print 'Result: OK'
print '''
Set 1: Challange 2:
Fixed XOR
'''
input1set1ch2 = r'1c0111001f010100061a024b53535009181c'
input2set1ch2 = r'686974207468652062756c6c277320657965'
expectedresultset1ch2 = r'746865206b696420646f6e277420706c6179'
assert expectedresultset1ch2 == binascii.hexlify(xor(binascii.unhexlify(input1set1ch2), binascii.unhexlify(input2set1ch2)))
print 'Result: OK'
print '''
Set 1: Challange 3:
Single-byte XOR cipher
'''
words = readwordlist(WORDLISTFILE)
#inputset1ch3 = r'1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'
#for res in findsinglebytexorcipher(inputset1ch3, words):
# print 'Key: %s' % res[1][2]
# print 'Result: %s' % res[1][1]
print '''
Set 1: Challange 4:
Detect single-character XOR
'''
#for res in detectsinglecharacterxor(words):
# print 'Key: %s' % res[1][2]
# print 'Result: %s' % res[1][1]
print '''
Set 1: Challange 5:
Implement repeating-key XOR
'''
input1set1ch5 = r'''Burning 'em, if you ain't quick and nimble I go crazy when I hear a cymbal'''
input2set1ch5 = r'ICE'
expectedresultset1ch5 = r'0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282f'
#print binascii.hexlify(xor_key(input1set1ch5, input2set1ch5))
#print expectedresultset1ch5
print '''
Set 1: Challange 6:
Break repeating-key XOR
'''
input1set1ch6 = r'this is a test'
input2set1ch6 = r'wokka wokka!!!'
assert 37 == hammingdistance(input1set1ch6, input2set1ch6)
data = b64filetobinary(SET1CH6INPUTFILE)
#tryguesskeysize(data)
#breakrepeatingkeyxor(data, 3)
breakrepeatingkeyxor(data, 5)
#breakrepeatingkeyxor(data, 8)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import unittest
from mock import mock
from conans.client.tools.oss import detected_architecture
class DetectedArchitectureTest(unittest.TestCase):
def test_various(self):
# x86
with mock.patch("platform.machine", mock.MagicMock(return_value='i386')):
self.assertEqual('x86', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='i686')):
self.assertEqual('x86', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='x86_64')):
self.assertEqual('x86_64', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='amd64')):
self.assertEqual('x86_64', detected_architecture())
# ARM
with mock.patch("platform.machine", mock.MagicMock(return_value='aarch64_be')):
self.assertEqual('armv8', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='armv8b')):
self.assertEqual('armv8', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='armv7l')):
self.assertEqual('armv7', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='armv6l')):
self.assertEqual('armv6', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='arm')):
self.assertEqual('armv6', detected_architecture())
# PowerPC
with mock.patch("platform.machine", mock.MagicMock(return_value='ppc64le')):
self.assertEqual('ppc64le', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='ppc64')):
self.assertEqual('ppc64', detected_architecture())
# MIPS
with mock.patch("platform.machine", mock.MagicMock(return_value='mips')):
self.assertEqual('mips', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='mips64')):
self.assertEqual('mips64', detected_architecture())
# SPARC
with mock.patch("platform.machine", mock.MagicMock(return_value='sparc')):
self.assertEqual('sparc', detected_architecture())
with mock.patch("platform.machine", mock.MagicMock(return_value='sparc64')):
self.assertEqual('sparcv9', detected_architecture())
|
python
|
import pytest
import shutil
from accident_prediction_montreal.weather import get_weather_station_id_df
from accident_prediction_montreal.utils import init_spark
@pytest.fixture(scope='session')
def spark():
return init_spark()
def test_get_weather_station_id_df(spark):
acc_sample = spark.read.parquet('tests/data/preprocessed_accidents_sample.parquet')
acc_sample = acc_sample.limit(200)
get_weather_station_id_df(spark, acc_sample, cache_file='/tmp/test.parquet')
shutil.rmtree('/tmp/test.parquet')
|
python
|
# -*- coding: utf-8 -*-
import sys
import argparse
from kits.parse_articles import parse_articles
from config.conf import conf
from config.conf import enviroment
from kits.log import get_logger
def initialize():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env", action="store", dest="env",
help="enviroment of server. prod|test|dev")
# parser.add_argument("-p", "--port", action="store", dest="port", default=6003,
# help="port of running iplive node")
args = parser.parse_args()
if args.env not in ["dev", "prod"]:
print("enviroment not support")
sys.exit()
env = enviroment[args.env]
conf['env'] = env
if 'logger' not in conf:
conf['logger'] = get_logger("blog", on_screen=True, level=env['level'])
parse_articles()
|
python
|
#!/usr/bin/env python
# encoding: utf-8
# PYTHON_ARGCOMPLETE_OK
# from __future__ imports must occur at the beginning of the file
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from .bypy import main
if __name__ == "__main__":
main()
# vim: tabstop=4 noexpandtab shiftwidth=4 softtabstop=4 ff=unix fileencoding=utf-8
|
python
|
"""
Define a new function using the @qgsfunction decorator.
The function accept the following parameters
:param [any]: Define any parameters you want to pass to your function before
the following arguments.
:param feature: The current feature
:param parent: The QgsExpression object
:param context: If there is an argument called ``context`` found at the last
position, this variable will contain a ``QgsExpressionContext``
object, that gives access to various additional information like
expression variables. E.g. ``context.variable('layer_id')``
:returns: The result of the expression.
The @qgsfunction decorator accepts the following arguments:
:param args: Defines the number of arguments. With ``args='auto'`` the number
arguments will automatically be extracted from the signature.
:param group: The name of the group under which this expression function will
be listed.
:param usesgeometry: Set this to False if your function does not access
feature.geometry(). Defaults to True.
:param referenced_columns: An array of attribute names that are required to run
this function. Defaults to
[QgsFeatureRequest.ALL_ATTRIBUTES].
"""
from qgis.core import *
from qgis.gui import *
import hashlib
import os
@qgsfunction(args='auto', group='Custom')
def my_md5sum(value1, feature, parent, context):
"""
Calculates the sum of the two parameters value1 and value2.
<h2>Example usage:</h2>
<ul>
<li>my_sum(5, 8) -> 13</li>
<li>my_sum("field1", "field2") -> 42</li>
</ul>
"""
print(value1)
print(context.variable('project_home'))
project_home = context.variable('project_home')
relative_filename = value1
fullpath = os.path.join(project_home, relative_filename)
print(fullpath)
filehash = hashlib.md5()
return 'Hallo Stefan'
|
python
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from datetime import datetime
class CommonRealDateUtils:
"""A utility for managing real life date and time.
.. note:: This utility is used to handle the Real Time and not the Game Time.
"""
@staticmethod
def get_current_date_time() -> datetime:
"""get_current_date_time()
Retrieve the current date and time.
:return: The current real date and time.
:rtype: datetime
"""
return datetime.now()
@staticmethod
def get_current_date_string() -> str:
"""get_current_date_string()
Retrieve the current date as a pre-formatted string.
:return: The string representation of the current real date.
:rtype: str
"""
return str(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
|
python
|
import logging
from .utils import get_logging_level
class Client(object):
""" This object autheticates the account to the api """
def __init__(self, auth, **kwargs):
"""
Initializes Client object.
Args:
auth (tuple): Authentication object
api (str): Api endpath
"""
self.auth = auth
self.protocol = kwargs.get('protocol', 'https')
self.domain = kwargs.get('domain', 'api.sumologic.com')
self.api = kwargs.get('api', '/api/v1')
api_path = '%s' % self.api
self.url = '%s://%s%s' % (self.protocol, self.domain, api_path)
# setup debug logging
self._debug_mode = kwargs.get('debug', False)
self.log = logging.getLogger(__name__)
self.log.addHandler(logging.StreamHandler())
self.log.setLevel(get_logging_level(self._debug_mode))
def get_url(self):
""" Returns full api url """
return self.url
def get_auth(self):
""" Returns api auth details """
return self.auth
|
python
|
from django.shortcuts import render
from .models import Salas,Rentals
from django.db.models import Q
from django.shortcuts import get_object_or_404
from .forms import RentalForm
def salas_home_view(request,*args,**kargs):
hora_inicial = request.GET.get('inicio',None)
hora_fim = request.GET.get('fim',None)
salas = Salas.objects.all()
salas_ocupadas = []
get_flag = 1;
if (hora_inicial and hora_fim):
query_set = Rentals.objects.filter((Q(data_inicio__gte=hora_inicial) & Q(data_inicio__lt=hora_fim))|(Q(data_final__gt=hora_inicial) & Q(data_final__lte=hora_fim)))
get_flag = 0;
for instance in query_set:
salas_ocupadas.append(instance.sala.nome)
return render(request,"salas_home_page.html",{'salas_query':salas,
'salas_ocupadas_lista':salas_ocupadas,
'get_flag': get_flag})
def salas_rental_view(request,sala_slug): #VALIDACOES FALTA
sala = get_object_or_404(Salas, nome=sala_slug.upper())
my_form = RentalForm()
dados_form = {}
if request.method == 'POST':
my_form = RentalForm(request.POST)
if my_form.is_valid():
dados_form = my_form.cleaned_data
dados_form['sala'] = sala
dados_form['user_rental'] = request.user.username
# date=dados['date']
# time = dados['time']
# datetime = date + time
# novo = Rentals(date=datetime)
Rentals.objects.create(**dados_form)
else:
print(my_form.errors)
return render(request,"rentals.html",{'form':my_form,
'sala':sala})
|
python
|
from typing import MutableSequence
class WrappingList(MutableSequence):
def __init__(self, l=[]):
if type(l) is not list:
raise ValueError()
self._inner_list = l
def __len__(self):
return len(self._inner_list)
def __delitem__(self, index):
index = self.__validindex(index)
self._inner_list.__delitem__(index)
def insert(self, index, value):
index = self.__validindex(index)
self._inner_list.insert(index, value)
def __setitem__(self, index, value):
index = self.__validindex(index)
self._inner_list.__setitem__(index, value)
def __getitem__(self, index):
index = self.__validindex(index)
return self._inner_list.__getitem__(index)
def __validindex(self, index):
return index % len(self._inner_list)
lst = [0,2,3,41,2]
wlst = WrappingList(lst)
for i in range(-10, 10):
print(wlst[i])
|
python
|
import numpy as np
from scipy.special import erf
class GELU:
def __call__(self, x: np.ndarray, approximate: bool = True) -> np.ndarray:
if approximate:
return x * 0.5 * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
return x * 0.5 * (1.0 + erf(x / np.sqrt(2.0)))
def gradient(self, x: np.ndarray, approximate: bool = True) -> np.ndarray:
if approximate:
return 0.5 * np.tanh(0.0356774 * np.power(x, 3) + 0.797885 * x) + (0.0535161 * np.power(x, 3) + 0.398942 * x) * np.power(1 / np.cosh(x), 2) * (0.0356774 * np.power(x, 3) + 0.797885 * x) + 0.5
return 0.5 * (1.0 + erf(x / np.sqrt(2.0))) + x * 1 / np.sqrt(2 * np.pi) * np.exp(-0.5 * np.power(x, 2))
|
python
|
import sys
import os
import pty
import subprocess
import shlex
import selectors
import multiprocessing as mp
def run(cmd, input_lines=[]):
"""Run an interactive given command with input and capture all IO.
Parameters
----------
cmd: str
The command to run
input_lines: list
List of str with each line of text to be entered into the running cmd
"""
q = mp.Queue()
try:
(child_pid, fd) = pty.fork()
except OSError as e:
print(str(e))
if child_pid == 0:
sys.stdout.flush()
try:
subprocess.run(shlex.split(cmd))
except subprocess.SubprocessError:
print("Couldn't spawn subprocess...")
f = open(os.devnull, "w")
sys.stdout = f
q.put("FINISHED")
return
else:
data = b""
sel = selectors.DefaultSelector()
sel.register(fd, selectors.EVENT_READ)
line_pos = 0
while True:
# Block until new event
sel.select()
# Read all available data
while True:
events = sel.select(-1)
# No more events, stop reading
if len(events) == 0:
break
read_data = os.read(fd, 1024)
if read_data:
data += read_data
# Write if we have something to write
# otherwise skip to final read only mode
if line_pos <= len(input_lines) - 1:
os.write(fd, (input_lines[line_pos] + "\n").encode("utf-8"))
line_pos += 1
else:
break
# Wait for child process to signal end
q.get(block=True)
while True:
events = sel.select(-1)
# No more events, stop reading
if len(events) == 0:
break
read_data = os.read(fd, 1024)
if read_data:
data += read_data
return data.decode("utf-8")
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 3 14:52:19 2017
@author: Falaize
"""
from __future__ import absolute_import
import sys
from PyQt5.QtWidgets import QApplication
from widgets import PyphsGui
import os
path = os.path.join('pyphs_gui', 'tests', 'rlc.net')
os.chdir('..')
def runner(path=None):
app = QApplication(sys.argv)
sys.argv
e = PyphsGui(path)
e.show()
e.raise_()
sys.exit(app.exec_())
return True
if __name__ is '__main__':
# app = QApplication(sys.argv)
e = PyphsGui(path)
e.show()
e.raise_()
# sys.exit(app.exec_())
|
python
|
# Copyright 2014 Baidu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provides models for BES-SDK.
"""
class Billing(object):
"""
Billing Class
"""
def __init__(self, payment_type, time):
self.paymentType = payment_type
self.time = time
class Module(object):
"""
Module Class
"""
def __init__(self, type=None, instance_num=None,
version=None, slot_type=None, desire_instance_num=None):
if type is not None:
self.type = type
if instance_num is not None:
self.instanceNum = instance_num
if version is not None:
self.version = version
if slot_type is not None:
self.slotType = slot_type
if desire_instance_num is not None:
self.desireInstanceNum = desire_instance_num
|
python
|
#*******************************************************************************
#
# Filename : EDMNaming.py
# Description : Getting the reduced name of a dataset and others
# Author : Yi-Mu "Enoch" Chen [ [email protected] ]
#
#*******************************************************************************
|
python
|
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import bson
from st2common.models.db.executionstate import ActionExecutionStateDB
from st2common.persistence.executionstate import ActionExecutionState
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2tests import DbTestCase
class ActionExecutionStateTests(DbTestCase):
def test_state_crud(self):
saved = ActionExecutionStateTests._create_save_actionstate()
retrieved = ActionExecutionState.get_by_id(saved.id)
self.assertDictEqual(saved.query_context, retrieved.query_context)
self.assertEqual(saved.query_module, retrieved.query_module)
ActionExecutionStateTests._delete(model_objects=[retrieved])
try:
retrieved = ActionExecutionState.get_by_id(saved.id)
except StackStormDBObjectNotFoundError:
retrieved = None
self.assertIsNone(retrieved, 'managed to retrieve after failure.')
@staticmethod
def _create_save_actionstate():
created = ActionExecutionStateDB()
created.query_context = {'id': 'some_external_service_id'}
created.query_module = 'dummy.modules.query1'
created.execution_id = bson.ObjectId()
return ActionExecutionState.add_or_update(created)
@staticmethod
def _delete(model_objects=[]):
for model_object in model_objects:
model_object.delete()
|
python
|
# -*- coding: utf-8 -*-
from rest_framework import viewsets, mixins
from .models import Pet, Birth, Breed
from .serializers import PetSerializer, BreedSerializer, BirthSerializer
class PetViewSet(mixins.CreateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = Pet.objects
serializer_class = PetSerializer
class BirthViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = Birth.objects
serializer_class = BirthSerializer
class BreedViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = Breed.objects
serializer_class = BreedSerializer
|
python
|
""" Tests Input.__init__() and classes in input_classes.py
Note that this is more of a regression test than a unit test.
Compares with previously computed results."""
import os
import numpy as np
import pytest
import astropy.units as u
from astropy.time import Time
from nexoclom.solarsystem import SSObject
from nexoclom import Input, __file__ as basefile
basepath = os.path.dirname(basefile)
if __name__ == '__main__':
inputpath = os.path.join('test_data', 'inputfiles')
else:
inputpath = os.path.join(basepath, 'modelcode', 'tests', 'test_data', 'inputfiles')
@pytest.mark.modelcode
def test_geometry():
inputfile01 = os.path.join(inputpath, 'Geometry.01.input')
geometry01 = Input(inputfile01).geometry
result = {'planet': SSObject('Jupiter'),
'startpoint': 'Io',
'objects': {SSObject('Jupiter'), SSObject('Io'), SSObject('Europa')},
'type': 'geometry without starttime',
'phi': (1*u.rad, 2*u.rad),
'subsolarpoint': (3.14*u.rad, 0*u.rad),
'taa': 1.57*u.rad}
assert geometry01.__dict__ == result
inputfile02 = os.path.join(inputpath, 'Geometry.02.input')
geometry02 = Input(inputfile02).geometry
result = {'planet': SSObject('Jupiter'),
'startpoint': 'Io',
'objects': {SSObject('Jupiter'), SSObject('Io')},
'type': 'geometry with starttime',
'time': Time('2022-03-08T19:53:21')}
assert geometry02.__dict__ == result
inputfile03 = os.path.join(inputpath, 'Geometry.03.input')
geometry03 = Input(inputfile03).geometry
result = {'planet':SSObject('Mercury'),
'startpoint': 'Mercury',
'objects': {SSObject('Mercury')},
'type': 'geometry without starttime',
'subsolarpoint': (0 * u.rad, 0 * u.rad),
'phi': None,
'taa': 3.14 * u.rad}
assert geometry03.__dict__ == result
assert geometry01 == geometry01
assert geometry01 != geometry02
assert geometry01 != geometry03
@pytest.mark.modelcode
def test_SurfaceInteraction():
# sticktype = 'constant'
inputfile01 = os.path.join(inputpath, 'SurfaceInteraction.01.input')
interaction01 = Input(inputfile01).surfaceinteraction
result = {'sticktype': 'constant',
'stickcoef': 1.,
'accomfactor': None}
assert interaction01.__dict__ == result
inputfile02 = os.path.join(inputpath, 'SurfaceInteraction.02.input')
interaction02 = Input(inputfile02).surfaceinteraction
result = {'sticktype': 'constant',
'stickcoef': 0.5,
'accomfactor': 0.2}
assert interaction02.__dict__ == result
assert interaction01 == interaction01
assert interaction01 != interaction02
# sticktype = 'temperature dependent
inputfile03 = os.path.join(inputpath, 'SurfaceInteraction.03.input')
interaction03 = Input(inputfile03).surfaceinteraction
result = {'sticktype': 'temperature dependent',
'accomfactor': 0.2,
'A': (1.57014, -0.006262, 0.1614157)}
assert interaction03.__dict__ == result
inputfile04 = os.path.join(inputpath, 'SurfaceInteraction.04.input')
interaction04 = Input(inputfile04).surfaceinteraction
result = {'sticktype':'temperature dependent',
'accomfactor':0.5,
'A': (1., 0.001, 0.2)}
assert interaction04.__dict__ == result
inputfile05 = os.path.join(inputpath, 'SurfaceInteraction.05.input')
interaction05 = Input(inputfile05).surfaceinteraction
result = {'sticktype':'surface map',
'stick_mapfile': 'default',
'coordinate_system': 'solar-fixed',
'subsolarlon': None,
'accomfactor':0.5}
assert interaction05.__dict__ == result
inputfile06 = os.path.join(inputpath, 'SurfaceInteraction.06.input')
interaction06 = Input(inputfile06).surfaceinteraction
result = {'sticktype':'surface map',
'stick_mapfile': 'Orbit3576.Ca.pkl',
'coordinate_system': 'solar-fixed',
'subsolarlon': None,
'accomfactor':0.5}
assert interaction06.__dict__ == result
@pytest.mark.modelcode
def test_Forces():
inputfile01 = os.path.join(inputpath, 'Forces.01.input')
forces01 = Input(inputfile01).forces
result = {'gravity': True,
'radpres': True}
assert forces01.__dict__ == result
inputfile02 = os.path.join(inputpath, 'Forces.02.input')
forces02 = Input(inputfile02).forces
result = {'gravity': False,
'radpres': True}
assert forces02.__dict__ == result
inputfile03 = os.path.join(inputpath, 'Forces.03.input')
forces03 = Input(inputfile03).forces
result = {'gravity': True,
'radpres': False}
assert forces03.__dict__ == result
@pytest.mark.modelcode
def test_SpatialDist():
inputfile01 = os.path.join(inputpath, 'Spatial.01.input')
spatial01 = Input(inputfile01).spatialdist
result = {'type': 'uniform',
'longitude': (0*u.rad, 2*np.pi*u.rad),
'latitude': (-np.pi/2*u.rad, np.pi/2*u.rad),
'exobase': 1.}
assert spatial01.__dict__ == pytest.approx(result)
inputfile02 = os.path.join(inputpath, 'Spatial.02.input')
spatial02 = Input(inputfile02).spatialdist
result = {'type':'uniform',
'longitude':(0*u.rad, 3.14*u.rad),
'latitude':(0*u.rad, 0.79*u.rad),
'exobase':2.1}
assert spatial02.__dict__ == pytest.approx(result)
if __name__ == '__main__':
# test_geometry()
# test_SurfaceInteraction()
# test_Forces()
test_SpatialDist()
|
python
|
import argparse
import os
import numpy as np
import torch
from mmcv.runner import Sequential
from tensorflow.python.training import py_checkpoint_reader
from mmcls.models.backbones.efficientnet import EfficientNet
def tf2pth(v):
if v.ndim == 4:
return np.ascontiguousarray(v.transpose(3, 2, 0, 1))
elif v.ndim == 2:
return np.ascontiguousarray(v.transpose())
return v
def read_ckpt(ckpt):
reader = py_checkpoint_reader.NewCheckpointReader(ckpt)
weights = {
n: torch.as_tensor(tf2pth(reader.get_tensor(n)))
for (n, _) in reader.get_variable_to_shape_map().items()
}
return weights
def map_key(weight):
m = dict()
has_expand_conv = set()
is_MBConv = set()
max_idx = 0
name = None
for k, v in weight.items():
seg = k.split('/')
if len(seg) == 1:
continue
if 'edgetpu' in seg[0]:
name = 'e' + seg[0][21:].lower()
else:
name = seg[0][13:]
if seg[2] == 'tpu_batch_normalization_2':
has_expand_conv.add(seg[1])
if seg[1].startswith('blocks_'):
idx = int(seg[1][7:]) + 1
max_idx = max(max_idx, idx)
if 'depthwise' in k:
is_MBConv.add(seg[1])
model = EfficientNet(name)
idx2key = []
for idx, module in enumerate(model.layers):
if isinstance(module, Sequential):
for j in range(len(module)):
idx2key.append('{}.{}'.format(idx, j))
else:
idx2key.append('{}'.format(idx))
for k, v in weight.items():
if 'Exponential' in k or 'RMS' in k:
continue
seg = k.split('/')
if len(seg) == 1:
continue
if seg[2] == 'depthwise_conv2d':
v = v.transpose(1, 0)
if seg[1] == 'stem':
prefix = 'backbone.layers.{}'.format(idx2key[0])
mapping = {
'conv2d/kernel': 'conv.weight',
'tpu_batch_normalization/beta': 'bn.bias',
'tpu_batch_normalization/gamma': 'bn.weight',
'tpu_batch_normalization/moving_mean': 'bn.running_mean',
'tpu_batch_normalization/moving_variance': 'bn.running_var',
}
suffix = mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
elif seg[1].startswith('blocks_'):
idx = int(seg[1][7:]) + 1
prefix = '.'.join(['backbone', 'layers', idx2key[idx]])
if seg[1] not in is_MBConv:
mapping = {
'conv2d/kernel':
'conv1.conv.weight',
'tpu_batch_normalization/gamma':
'conv1.bn.weight',
'tpu_batch_normalization/beta':
'conv1.bn.bias',
'tpu_batch_normalization/moving_mean':
'conv1.bn.running_mean',
'tpu_batch_normalization/moving_variance':
'conv1.bn.running_var',
'conv2d_1/kernel':
'conv2.conv.weight',
'tpu_batch_normalization_1/gamma':
'conv2.bn.weight',
'tpu_batch_normalization_1/beta':
'conv2.bn.bias',
'tpu_batch_normalization_1/moving_mean':
'conv2.bn.running_mean',
'tpu_batch_normalization_1/moving_variance':
'conv2.bn.running_var',
}
else:
base_mapping = {
'depthwise_conv2d/depthwise_kernel':
'depthwise_conv.conv.weight',
'se/conv2d/kernel': 'se.conv1.conv.weight',
'se/conv2d/bias': 'se.conv1.conv.bias',
'se/conv2d_1/kernel': 'se.conv2.conv.weight',
'se/conv2d_1/bias': 'se.conv2.conv.bias'
}
if seg[1] not in has_expand_conv:
mapping = {
'conv2d/kernel':
'linear_conv.conv.weight',
'tpu_batch_normalization/beta':
'depthwise_conv.bn.bias',
'tpu_batch_normalization/gamma':
'depthwise_conv.bn.weight',
'tpu_batch_normalization/moving_mean':
'depthwise_conv.bn.running_mean',
'tpu_batch_normalization/moving_variance':
'depthwise_conv.bn.running_var',
'tpu_batch_normalization_1/beta':
'linear_conv.bn.bias',
'tpu_batch_normalization_1/gamma':
'linear_conv.bn.weight',
'tpu_batch_normalization_1/moving_mean':
'linear_conv.bn.running_mean',
'tpu_batch_normalization_1/moving_variance':
'linear_conv.bn.running_var',
}
else:
mapping = {
'depthwise_conv2d/depthwise_kernel':
'depthwise_conv.conv.weight',
'conv2d/kernel':
'expand_conv.conv.weight',
'conv2d_1/kernel':
'linear_conv.conv.weight',
'tpu_batch_normalization/beta':
'expand_conv.bn.bias',
'tpu_batch_normalization/gamma':
'expand_conv.bn.weight',
'tpu_batch_normalization/moving_mean':
'expand_conv.bn.running_mean',
'tpu_batch_normalization/moving_variance':
'expand_conv.bn.running_var',
'tpu_batch_normalization_1/beta':
'depthwise_conv.bn.bias',
'tpu_batch_normalization_1/gamma':
'depthwise_conv.bn.weight',
'tpu_batch_normalization_1/moving_mean':
'depthwise_conv.bn.running_mean',
'tpu_batch_normalization_1/moving_variance':
'depthwise_conv.bn.running_var',
'tpu_batch_normalization_2/beta':
'linear_conv.bn.bias',
'tpu_batch_normalization_2/gamma':
'linear_conv.bn.weight',
'tpu_batch_normalization_2/moving_mean':
'linear_conv.bn.running_mean',
'tpu_batch_normalization_2/moving_variance':
'linear_conv.bn.running_var',
}
mapping.update(base_mapping)
suffix = mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
elif seg[1] == 'head':
seq_key = idx2key[max_idx + 1]
mapping = {
'conv2d/kernel':
'backbone.layers.{}.conv.weight'.format(seq_key),
'tpu_batch_normalization/beta':
'backbone.layers.{}.bn.bias'.format(seq_key),
'tpu_batch_normalization/gamma':
'backbone.layers.{}.bn.weight'.format(seq_key),
'tpu_batch_normalization/moving_mean':
'backbone.layers.{}.bn.running_mean'.format(seq_key),
'tpu_batch_normalization/moving_variance':
'backbone.layers.{}.bn.running_var'.format(seq_key),
'dense/kernel':
'head.fc.weight',
'dense/bias':
'head.fc.bias'
}
key = mapping['/'.join(seg[2:])]
if name.startswith('e') and 'fc' in key:
v = v[1:]
m[key] = v
return m
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=str, help='Path to the ckpt.')
parser.add_argument('outfile', type=str, help='Output file.')
args = parser.parse_args()
assert args.outfile
outdir = os.path.dirname(os.path.abspath(args.outfile))
if not os.path.exists(outdir):
os.makedirs(outdir)
weights = read_ckpt(args.infile)
weights = map_key(weights)
torch.save(weights, args.outfile)
|
python
|
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import sys
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl
from PyQt4.QtWebKit import QWebPage
import bs4 as bs
import urllib.request
import os
import datetime
path_of_brandwise = 'C:\\LavaWebScraper\\BrandWiseFiles\\'
############## VARIABLES NEEDED : DO NOT CHANGE THE VARIABLE NAMES. JUST FILL IN THEIR VALUES WHEREVER REQUIRED. ####################
base_url = 'http://www.karbonnmobiles.com/smart-phones'
country = 'INDIA'
company = 'KARBONN'
model_list = []
usp = []
display_list = []
memory_list = []
processor_list = []
camera_list = []
battery_list = []
thickness_list = []
extras_links = []
records = []
href = []
specs_list = []
####################################################################################################################################
r=requests.get(base_url)
soup=BeautifulSoup(r.text,'html.parser')
results=soup.find_all('h3',attrs={'class':'product_name'})
for i in range(len(results)):
href.append('http://www.karbonnmobiles.com' + results[i].find('a')['href'])
model_list.append(results[i].find('a').text)
specs=soup.find_all('div', attrs={'class':'product_info'})
for i in range(len(specs)):
spec=specs[i].find('p')
sp_list = []
sp = spec.text
spp = sp.split('\n')
for s in spp:
sp_list.append(s.replace('<br>',' ').replace('\n', ' ').replace('\r',' ').strip().strip('\n'))
specs_list.append(sp_list)
for i in range(len(specs_list)):
cc = ''
for j in range(len(specs_list[i])):
if 'Processor' in specs_list[i][j]:
processor_list.append(specs_list[i][j])
if 'Display' in specs_list[i][j]:
display_list.append(specs_list[i][j])
if 'Battery' in specs_list[i][j] or 'battery' in specs_list[i][j]:
battery_list.append(specs_list[i][j])
if 'camera' in specs_list[i][j] or 'Camera' in specs_list[i][j]:
cc = cc + specs_list[i][j] + ' || '
if cc!='':
camera_list.append(cc)
if len(processor_list)==i:
processor_list.append('Not Available')
if len(display_list)==i:
display_list.append('Not Available')
if len(battery_list)==i:
battery_list.append('Not Available')
if len(camera_list)==i:
camera_list.append('Not Available')
thickness_list.append('Not Available')
memory_list.append('Not Available')
print(len(processor_list))
print(len(display_list))
print(len(battery_list))
print(len(camera_list))
print(len(thickness_list))
print(len(memory_list))
extras_links = href
for i in range(len(href)):
usp.append('Not Available')
############# WRITING TO CSV : DO NOT MAKE ANY CHANGES TO THIS PART EXCEPT WRITING THE FILE NAME. ###################################
for i in range(len(model_list)):
records.append((country, company, model_list[i], usp[i], display_list[i], camera_list[i], memory_list[i], battery_list[i], thickness_list[i], processor_list[i], extras_links[i]))
df = pd.DataFrame(records, columns = ['COUNTRY', 'COMPANY', 'MODEL', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS'])
df.to_csv(os.path.join(path_of_brandwise, str(datetime.date.today())+ '-karbonn' +'.csv'), index=False, encoding='utf-8')
#####################################################################################################################################
|
python
|
#!/usr/bin/env python3
import argparse
import sys
import os
import stat
import httplib2
from oauth2client.service_account import ServiceAccountCredentials
import apiclient
SCOPE = 'https://www.googleapis.com/auth/androidpublisher'
SCRIPT_VERSION = '2021-08-22'
def print_info():
print('publisher.py: %s' % SCRIPT_VERSION)
print('')
def build_service(service_account_email, key_file):
print('WARNING: build_service will be removed. use build_service_from_p12')
return build_service_from_p12(service_account_email, key_file)
def build_service_from_p12(service_account_email, key_file):
print('setup credentials and building service')
# build service acount using p12 file, based on
# https://stackoverflow.com/a/35666374/1016377
credentials = ServiceAccountCredentials.from_p12_keyfile(
service_account_email, key_file, scopes=[SCOPE])
http = httplib2.Http()
http = credentials.authorize(http)
return apiclient.discovery.build('androidpublisher', 'v3', http=http)
def build_service_from_json_file(json_key_file):
print('setup credentials and building service from %s' % json_key_file)
credentials = ServiceAccountCredentials.from_json_keyfile_name(json_key_file, scopes=[SCOPE])
http = httplib2.Http()
http = credentials.authorize(http)
return apiclient.discovery.build('androidpublisher', 'v3', http=http)
def create_edit(service, package_name):
request = service.edits().insert(body={}, packageName=package_name)
result = request.execute()
edit_id = result['id']
print('setup edit: %s' % (edit_id))
return edit_id
def update_track(service, package_name, edit_id, track, version):
print('update track %s' % (track))
request = service.edits().tracks().update(
editId=edit_id,
track=track,
packageName=package_name,
body={
u'track': track,
u'releases': [{
u'name': version['name'],
u'versionCodes': [version['code']],
u'releaseNotes': [{
u'language': u'en-GB',
u'text': version['notes']
}],
u'status': u'completed',
}]
})
response = request.execute()
print('setup with: %s' % (str(response['releases'])))
def validate_and_commit_edit(service, package_name, edit_id):
response = service.edits().validate(editId=edit_id, packageName=package_name).execute()
print('validated %s' % (response))
response = service.edits().commit(editId=edit_id, packageName=package_name).execute()
print('commited %s' % (response))
def upload_bundle(service, package_name, edit_id, aab_file):
print('uploading %s' % (aab_file))
request = service.edits().bundles().upload(
editId=edit_id,
packageName=package_name,
media_body=aab_file,
media_mime_type='application/octet-stream',
)
response = request.execute()
print('uploaded, %s' % (response))
# running as cli
def __run_from_cli_args(flags):
if flags.authentication_type == 'p12':
service = build_service_from_p12(flags.p12_service_account_email, flags.p12_key_path)
elif flags.authentication_type == 'json':
service = build_service_from_json_file(flags.json_key_file)
else:
raise ValueError('Unknown authentication type %s' % flags.authentication_type)
edit_id = create_edit(service, flags.package_name)
if flags.upload_aab:
upload_bundle(service, flags.package_name, edit_id, flags.upload_aab)
update_track(service, flags.package_name, edit_id, flags.track, version={
'name': flags.play_console_release_name,
'code': flags.version_code,
'notes': flags.release_notes,
})
validate_and_commit_edit(service, flags.package_name, edit_id)
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=True)
# authentication and app details
auth_params = parser.add_mutually_exclusive_group(required=True)
auth_params.add_argument('--p12',
nargs=2,
metavar=('[email protected]', 'p12keyfile'),
help='Use a service account email and p12 key file for authentication')
auth_params.add_argument('--json',
nargs=1,
metavar=('json-key-file'),
help='Use a json key file for authentication')
parser.add_argument('package_name',
metavar='package-name',
help='Android package name (applicationId, reverse domain name)')
# release details
release = parser.add_argument_group('release')
release.add_argument('version_code',
metavar='version-code',
type=int,
help='Android Version Code (int)')
release.add_argument('--track',
default='internal',
help='The Play Store track that should be updated (default: "internal")')
release.add_argument('--play-console-release-name',
metavar='release-name',
help='The name of the release in the Play store console ' +
'(default: same as the version code)')
release.add_argument('--release-notes-file',
metavar='file',
type=argparse.FileType('r'),
default=sys.stdin,
help='Read release notes from file. (default: read from stdin)')
# upload bundle
release.add_argument('--upload-aab',
metavar='aab-file',
help='The path to a bundle (*.aab) file that to upload ' +
'as part of the release')
print_info()
args = parser.parse_args()
if not args.play_console_release_name:
args.play_console_release_name = str(args.version_code)
# authentication type
if args.p12:
args.p12_service_account_email, args.p12_key_path = args.p12
args.p12 = None
args.authentication_type = 'p12'
if not os.path.isfile(args.p12_key_path):
raise Exception('p12 key file not found: %s' % args.p12_key_path)
elif args.json:
args.json_key_file = args.json[0]
args.json = None
args.authentication_type = 'json'
if not os.path.isfile(args.json_key_file):
raise Exception('json key file not found: %s' % args.json_key_file)
if args.release_notes_file == sys.stdin:
mode = os.fstat(sys.stdin.fileno()).st_mode
if stat.S_ISFIFO(mode) or stat.S_ISREG(mode):
pass # piped or redirected
else:
print("Enter release notes:")
args.release_notes = args.release_notes_file.read()
args.release_notes_file.close()
if args.upload_aab:
# using a file type causes issues on ci, so check file exist here
if not os.path.isfile(args.upload_aab):
raise Exception('File not found for --upload-aab: %s' % args.upload_aab)
__run_from_cli_args(args)
|
python
|
"""Base register module"""
def pad_zeroes(addr, n_zeroes):
"""Padds the address with zeroes"""
if len(addr) < n_zeroes:
return pad_zeroes("0" + addr, n_zeroes)
return addr
def int_addr(addr):
"""Gets the integer representation of an address"""
return int(addr[1:])
def next_addr(addr, i):
"""Gets address after the current + i"""
str_addr = pad_zeroes(str(int_addr(addr) + i), len(addr[1:]))
return addr[0] + str_addr
class Register(object):
"""Represents the register of a simulator"""
def __init__(self, is_16bit, initial_address):
self._is_16bit = is_16bit
self._register_size = 8
if self._is_16bit:
self._register_size = 16
self._current_bit_address = ""
self._current_address = initial_address;
self._size_of_current_register_address = 0;
self._register_map = {}
self.move_to_next_bit_address()
self.move_to_next_address(1);
self.mark_address(initial_address, self._size_of_current_register_address)
@property
def current_address(self):
"""Gets the current constant address of the register."""
return self._current_address
@property
def is_16bit(self):
"""Gets whether the device is 16-bit
(or else 8-bit)"""
return self._is_16bit
@property
def current_bit_address(self):
"""Gets the current bit address"""
return self._current_bit_address
def get_register_letter(self):
"""Gets the letter representing the register (R, K, or S)
"""
return self._current_address[0]
def mark_address(self, addr, size):
"""Marks address as being used in simulator"""
i = 0
while i < size:
self._register_map[addr] = True
i += 1
def is_address_in_use(self, addr):
"""Returns value which determines if register address in use"""
return self._register_map.get(addr)
def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = self._size_of_current_register_address
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i)
def next_address(self):
"""Returns the next address after the current"""
return self.next_address_avoid_collision(self._current_address)
def move_to_next_address(self, size_of_current):
"""Moves the register's current address to the next available.
size_of_current specifies how many bytes/words to skip"""
self._size_of_current_register_address = size_of_current
self._current_address = self.next_address()
self.mark_address(self._current_address, size_of_current)
def move_to_next_bit_address(self):
"""Moves to next available bit address position"""
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address)
def get_array(self, array_size):
"""Gets an array address"""
return "{0}[{1}]".format(self._current_address, array_size)
def next_bit_address(self):
"""Gets the next boolean address"""
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0")
|
python
|
from . import views
from django.urls import path
urlpatterns = [
path('', views.BlogHome.as_view(), name='blog'),
]
|
python
|
# Time: O(n)
# Space: O(1)
# Two pointers solution
class Solution(object):
def numSubarraysWithSum(self, A, S):
"""
:type A: List[int]
:type S: int
:rtype: int
"""
result = 0
left, right, sum_left, sum_right = 0, 0, 0, 0
for i, a in enumerate(A):
sum_left += a
while left < i and sum_left > S:
sum_left -= A[left]
left += 1
sum_right += a
while right < i and \
(sum_right > S or (sum_right == S and not A[right])):
sum_right -= A[right]
right += 1
if sum_left == S:
result += right-left+1
return result
|
python
|
from pytest import raises
import pandas as pd
from agape.mapping import dictify, gene2symbol
class TestDictify(object):
def setup_method(self):
self.df = pd.DataFrame({"A": [0, 1],
"B": ["x", "y"]})
self.key = "A"
self.value = "B"
def test_returns_dict(self):
assert isinstance(dictify(self.df, self.key, self.value), dict)
def test_df_equals_dict(self):
d = dictify(self.df, self.key, self.value)
assert all(self.df[self.key].values == list(d.keys()))
assert all(self.df[self.value].values == list(d.values()))
def test_raises_keyerror(self):
with raises(KeyError):
dictify(self.df, self.key, "C")
class TestGene2Symbol(object):
def test_returns_dict(self):
d = gene2symbol("ID", "Symbol")
assert isinstance(d, dict)
def test_raises_keyerror(self):
with raises(KeyError):
gene2symbol("ID", "NOTAKEY")
|
python
|
from . import api
from . import core
from .core import Stream, NameSpace, DataFeed
|
python
|
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from [email protected]. #
##############################################################################
import asyncio
import unittest
import sys
import numpy as np
sys.path.append('../../../hsds/util')
sys.path.append('../../../hsds')
# sys.path.append('../../chunkread')
from idUtil import getRootObjId
from storUtil import releaseStorageClient
from chunkread import get_app, read_hyperslab
import config
class ReadHyperslabTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ReadHyperslabTest, self).__init__(*args, **kwargs)
# main
async def read_hyperslab_test(self, app, params):
arr = await read_hyperslab(app, params)
self.assertEqual(arr.shape, (10,10))
self.assertEqual(arr.dtype, np.dtype('>i4'))
self.assertEqual(list(arr[1,:]), list(range(10)))
self.assertEqual(list(arr[:,1]), list(range(10)))
params["slices"]=((slice(1,2,1),slice(0,4,1)))
arr = await read_hyperslab(app, params)
self.assertEqual(arr.shape, (1,4))
self.assertEqual(arr.dtype, np.dtype('>i4'))
self.assertEqual(list(arr[0,:]), list(range(4)))
await releaseStorageClient(app)
def testReadHyperslab(self):
dset_id = config.get("dset111_id")
print("dset_id:", dset_id)
# these are the properties of the /g1/g1.1/dset1.1.1. dataset in tall.h5
dset_json = {"id": dset_id}
dset_json["root"] = getRootObjId(dset_id)
dset_json["type"] = {"class": "H5T_INTEGER", "base": "H5T_STD_I32BE"}
dset_json["shape"] = {"class": "H5S_SIMPLE", "dims": [10, 10], "maxdims": [10, 10]}
dset_json["layout"] = {"class": "H5D_CHUNKED", "dims": [10, 10]}
chunk_id = 'c' + dset_id[1:] + "_0_0"
params = {}
params["dset_json"] = dset_json
params["chunk_id"] = chunk_id
params["bucket"] = config.get("bucket")
loop = asyncio.get_event_loop()
app = get_app(loop=loop)
loop.run_until_complete(self.read_hyperslab_test(app, params))
loop.close()
if __name__ == '__main__':
#setup test files
unittest.main()
|
python
|
"""Test Refiners can be constructed with various configurations"""
import os
from copy import deepcopy
import pytest
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx import phil
from dials.algorithms.refinement import DialsRefineConfigError, RefinerFactory
from dials.algorithms.refinement.refiner import _trim_scans_to_observations, phil_scope
from dials.array_family import flex
from dials.util.slice import slice_reflections
@pytest.mark.parametrize(
"detector_parameterisation_choice",
["automatic", "single", "multiple", "hierarchical"],
)
def test_multi_panel_parameterisations(
dials_regression, detector_parameterisation_choice
):
data_dir = os.path.join(
dials_regression, "refinement_test_data", "cspad_refinement"
)
exp_file = os.path.join(data_dir, "cspad_refined_experiments_step6_level2_300.json")
ref_file = os.path.join(data_dir, "cspad_reflections_step7_300.pickle")
reflections = flex.reflection_table.from_file(ref_file)
experiments = ExperimentListFactory.from_json_file(exp_file, check_format=False)
# Set refinement parameters
params = phil_scope.fetch(source=phil.parse("")).extract()
params.refinement.parameterisation.detector.panels = (
detector_parameterisation_choice
)
# Construct refiner
if detector_parameterisation_choice == "single":
with pytest.raises(DialsRefineConfigError):
# Cannot create a single panel parameterisation for a multi-panel detector
RefinerFactory.from_parameters_data_experiments(
params, reflections, experiments
)
else:
refiner = RefinerFactory.from_parameters_data_experiments(
params, reflections, experiments
)
assert refiner.experiment_type == "stills"
def test_trim_scans_to_observations(dials_data):
# Use 4 scan data for this test
data_dir = dials_data("l_cysteine_dials_output")
experiments = ExperimentListFactory.from_json_file(
(data_dir / "indexed.expt").strpath, check_format=False
)
reflections = flex.reflection_table.from_file((data_dir / "indexed.refl").strpath)
# Check the image and oscillation range are what we expect
image_ranges = [e.sequence.get_image_range() for e in experiments]
osc_ranges = [e.sequence.get_oscillation_range() for e in experiments]
for a, b in zip(image_ranges, [(1, 1700), (1, 1700), (1, 1700), (1, 1800)]):
assert a == b
for a, b in zip(
osc_ranges, [(-145.0, 25.0), (-145.0, 25.0), (-145.0, 25.0), (0.0, 180.0)]
):
assert a == pytest.approx(b)
# If image range unchanged, nothing should happen
trim_expt = _trim_scans_to_observations(deepcopy(experiments), reflections)
new_im_ranges = [e.sequence.get_image_range() for e in trim_expt]
for a, b in zip(image_ranges, new_im_ranges):
assert a == b
# Slice 20 images off head and tail
sliced_ranges = [(r[0] + 20, r[1] - 20) for r in image_ranges]
sliced = slice_reflections(reflections, sliced_ranges)
# Now trimmed scans should have array ranges equal to their min, max
# shoebox z coords
trim_expt = _trim_scans_to_observations(deepcopy(experiments), sliced)
new_array_ranges = [e.sequence.get_array_range() for e in trim_expt]
for i, e in enumerate(trim_expt):
refs = sliced.select(sliced["id"] == i)
bb = refs["shoebox"].bounding_boxes()
z_min, z_max = bb.parts()[4:]
assert new_array_ranges[i] == (min(z_min), max(z_max))
# Oscillation ranges should be trimmed so that the associated angle is the
# same in the original and trimmed scans
new_osc_ranges = [e.sequence.get_oscillation_range() for e in trim_expt]
for exp, r1, r2 in zip(experiments, new_array_ranges, new_osc_ranges):
assert exp.sequence.get_angle_from_array_index(r1[0]) == pytest.approx(r2[0])
assert exp.sequence.get_angle_from_array_index(r1[1]) == pytest.approx(r2[1])
# Now delete shoebox data. Trimmed scans will be wider than the observed
# range by >0.5 deg at each end
del sliced["shoebox"]
trim_expt = _trim_scans_to_observations(deepcopy(experiments), sliced)
new_array_ranges = [e.sequence.get_array_range() for e in trim_expt]
for i, e in enumerate(trim_expt):
refs = sliced.select(sliced["id"] == i)
z = refs["xyzobs.px.value"].parts()[2]
im_width = e.sequence.get_oscillation()[1]
assert ((min(z) - new_array_ranges[i][0]) / im_width) > 0.5
assert ((new_array_ranges[i][1] - max(z)) / im_width) > 0.5
# Oscillation ranges should be trimmed so that the associated angle is the
# same in the original and trimmed scans
new_osc_ranges = [e.sequence.get_oscillation_range() for e in trim_expt]
for exp, r1, r2 in zip(experiments, new_array_ranges, new_osc_ranges):
assert exp.sequence.get_angle_from_array_index(r1[0]) == pytest.approx(r2[0])
assert exp.sequence.get_angle_from_array_index(r1[1]) == pytest.approx(r2[1])
|
python
|