file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
DissertationScript.py | [0][1]) #name[1] is political affiliation because the txt file was organized in such a way
except Exception as e:
print("Problem with name in file: "+file)
try:
if file[0]=="H":
a[name[0][0]].append(year[0] + " - House")
if file[0] == "S":
a[name[0][0]].append(year[0] + " - Senate")
except Exception as e:
print("Problem with year: "+file)
try:
a[name[0][0]].extend(edu)
except Exception as e:
print("Problem with education: " + file)
print(a)
#Suggestion to use json for defaultdict instead of csv (http://codereview.stackexchange.com/questions/30741/writing-defaultdict-to-csv-file)
json.dump(a,open('GPO Biographies - JSON','w'))
#pd.DataFrame.from_dict(a, orient='index').to_csv("GPO Biographies - Education1.csv")
#Takes a list of names, searches them on C-SPAN, and extracts the website associated with them - website includes PersonID
def cspan_selenium_getsite():
###Create files with each Senator and their full website, including PersonID
#Load GPO Biographies - JSON and extract the key, which is names of all Congresspeople going back to 1997
names = json.load(open('GPO Biographies - JSON'))
names = list(names.keys())
#This gets rid of middle names and middle initials, so the search is better on C-SPAN
names = [name.split(" ")[0].title() + " " + name.split(" ")[-1].title() for name in names]
# Log in with Selenium
driver = webdriver.Firefox()
driver.get("http://www.c-span.org")
login = driver.find_element_by_class_name("my-cspan")
login.click()
time.sleep(1)
user = driver.find_elements_by_id("login")[1]
user.clear()
user.send_keys(username)
pw = driver.find_element_by_id("password")
pw.clear()
pw.send_keys(pw)
clicklogin = driver.find_element_by_id("submit-login")
clicklogin.click()
errorlog = []
for name in names:
try:
#Have to wait a bit of time because the website gets screwy and can't find the dropdown menu sometimes
time.sleep(10)
openfilter = driver.find_element_by_class_name('selected')
# openfilter = driver.find_element_by_xpath("//form[@class='search']/fieldset/div/span[@class='carat icon-chevron-down']")
openfilter.click()
peoplefilter = driver.find_element_by_xpath('//div[@style]/ul/li[4]')
time.sleep(0.5)
peoplefilter.click()
namesearch = driver.find_element_by_id('global-search')
namesearch.clear()
namesearch.send_keys(name)
clicker = driver.find_element_by_class_name('icon-search')
clicker.click()
time.sleep(1.5)
search = driver.find_elements_by_class_name('thumb')[0]
search.click()
source = driver.page_source
ID = re.compile('personid\[\]=(.*?)"').findall(source)
print(name,names.index(name),ID)
with open("C-SPAN PersonID1.txt","a") as f:
f.write(name+"\t"+ID[0]+"\n")
if len(ID) > 4:
errorlog.append(name)
except Exception as e:
print("COME BACKKKK AND GET THIS ONE MANUALLY!!!: ", name)
errorlog.append(name)
print(errorlog)
#This takes a tab delimited file with name and C-SPAN website, and just simplifies it so it's name and C-SPAN ID (using output from cspan_selenium_getsite
def cspan_selenium_getid():
###Create a file with just Senator's name and personID in separate column
with open("C-SPAN PersonID.txt") as f:
f = f.read().splitlines()
print(f)
for item in f:
ID = item.split("=")[-1]
name = item.split("\t")[0]
with open("C-SPAN PersonID (simplified).txt","a") as g:
g.write(name+"\t"+ID+"\n")
#Makes file names the correct case (upper, lower) when matching them in match_name_CongressRecord() function below. Also turns it from
#tab delimited text file into list
def file_to_list_upper(file):
with open(file,"r",encoding="ISO-8859-1") as f:
f = f.readlines()
return([x.split("\t")[0].title() for x in f])
#C-SPAN PersonID file has names and C-SPAN ID. This function takes the person's name and returns their C-SPAN ID
def dict_cspan_id(name):
with open("C-SPAN PersonID (simplified).txt","r",encoding="Latin1") as f:
df = pd.Series.from_csv(f,sep="\t",header=None).to_dict()
return(df[name])
#Match name from Congressional Record to names of spoken word text files, and then get C-SPAN ID
def match_name_CongressRecord():
allcongrnames = json.load(open('GPO Biographies - JSON'))
allcongrnameslist = list(allcongrnames.keys())
#print(file_to_list_upper("C-SPAN PersonID (simplified).txt"))
#print(len(namelist))
allcongrnameslist = [name.split(" ")[0]+" "+name.split(" ")[-1] for name in allcongrnameslist]
#print(namelist)
#The /media/jemme directory is from my orange external hard drive
for root,dirs,files in os.walk("/media/jemme/New Volume/Congressional Hearings - People (new)"):
for file in files:
name = file.split("_")[0] # Need to match name with ID
date = file.split("_")[1][:-4] # Need in this format: 2015-06-10
try:
date = datetime.datetime.strptime(date, "%Y.%m.%d").strftime("%Y-%m-%d")
#print(name, date)
except Exception as e:
print(file + " has a weird file name, I think..." + str(e))
namematch = difflib.get_close_matches(name,allcongrnameslist,cutoff=.8)
# The outer function finds the ID based on the name, using the C-SPAN PersonID file. stringdist_list_imperfect compares the name of the file in the folder
# to the list of names from the C-SPAN PersonID file and returns the name that is closest, which is fed into the outer function to find the ID
if difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified).txt"),cutoff=.8):
print(difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified).txt"),cutoff=.8))
ID = dict_cspan_id(difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified).txt"),cutoff=.8)[0])
#ID = dict_cspan_id(difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified test).txt")))
print(name,ID)
#Several functions now add important info to the C-SPAN PersonID file - the below adds the dates each person spoke based on the transcripts
def add_dates():
with open("C-SPAN PersonID.txt",encoding="Latin1") as f:
f = f.read().splitlines()
#f = [item.split("\t")[0] for item in f]
#print(os.listdir("Congressional Hearings - People (new)"))
for item in f:
print(item)
#This first has to capitalize just the first letter of the transcript names, since they are all caps beforehand
#and, thus, can't match
transcriptnames = [name.title() for name in os.listdir("/media/jemme/New Volume/Congressional Hearings - People (new)")]
transcriptnamesmatch = difflib.get_close_matches(item,transcriptnames)
if transcriptnamesmatch:
print(transcriptnamesmatch)
#Turn the matched name back into all caps after it matches, so that it can find the actual transcript file
try:
dates = os.listdir("/media/jemme/New Volume/Congressional Hearings - People (new)/"+transcriptnamesmatch[0].upper())
except Exception as e:
print(item+" doesn't WORKKKKK!")
for date in dates:
date = date.split("_")[1][:-4].replace(".","-")
with open("C-SPAN PersonID and File Dates.txt","a") as outfile:
outfile.write(item+"\t"+transcriptnamesmatch[0]+"\t"+date+"\n")
#This is just a helper function used in add_date_month below - it converts dates into the proper format
def set_date(date):
| date = datetime.datetime.strptime(date,"%Y-%m-%d")
date = datetime.datetime.strftime(date, "%Y-%m-%d")
return(date) | identifier_body |
|
makesnapshots.py | 6: Public release
# version 2.0: Added daily, weekly and montly retention
# version 3.0: Rewrote deleting functions, changed description
# version 3.1: Fix a bug with the deletelist and added a pause in the volume loop
# version 3.2: Tags of the volume are placed on the new snapshot
# version 3.3: Merged IAM role addidtion from Github"""
import logging
import sys
import time
import traceback
from datetime import datetime
import boto3
import boto.ec2
import boto.sns
from boto.ec2.connection import EC2Connection
from boto.ec2.regioninfo import RegionInfo
from config import config
if len(sys.argv) < 2:
print('Please add a positional argument: day, week or month.')
quit()
else:
if sys.argv[1] == 'day':
period = 'day'
date_suffix = datetime.today().strftime('%a')
elif sys.argv[1] == 'week':
period = 'week'
date_suffix = datetime.today().strftime('%U')
elif sys.argv[1] == 'month':
period = 'month'
date_suffix = datetime.today().strftime('%b')
else:
print('Please use the parameter day, week or month')
quit()
# Message to return result via SNS
message = ""
errmsg = ""
# Counters
total_creates = 0
total_copies = 0
total_deletes = 0
count_errors = 0
# List with snapshots to delete
deletelist = []
# Setup logging
logging.basicConfig(filename=config['log_file'], level=logging.INFO)
start_message = 'Started taking %(period)s snapshots at %(date)s' % {
'period': period,
'date': datetime.today().strftime('%d-%m-%Y %H:%M:%S')
}
message += start_message + "\n\n"
logging.info(start_message)
# Assume IAM role if defined.
def refresh_aws_credentials():
global refresh_time
if config['iam_role_arn']:
if time.time() - refresh_time < 2700:
return
sts = boto3.client('sts')
assumedRoleObject = sts.assume_role(RoleArn=config['iam_role_arn'], RoleSessionName='AssumeRoleSession1')
credentials = assumedRoleObject['Credentials']
config['aws_access_key'] = credentials['AccessKeyId']
config['aws_secret_key'] = credentials['SecretAccessKey']
config['security_token'] = credentials['SessionToken']
refresh_time = time.time()
else:
config['security_token'] = None
refresh_time = 0
refresh_aws_credentials()
# Get settings from config.py
ec2_region_name = config['ec2_region_name']
ec2_region_endpoint = config['ec2_region_endpoint']
sns_arn = config.get('arn')
proxyHost = config.get('proxyHost')
proxyPort = config.get('proxyPort')
copy_region_name = config.get('copy_region_name')
region = RegionInfo(name=ec2_region_name, endpoint=ec2_region_endpoint)
# Number of snapshots to keep
keep_week = config['keep_week']
keep_day = config['keep_day']
keep_month = config['keep_month']
count_success = 0
count_total = 0
# Connect to AWS using the credentials provided above or in Environment vars or using IAM role.
print('Connecting to AWS')
if proxyHost:
# proxy:
# using roles
if config['aws_access_key']:
conn = EC2Connection(config['aws_access_key'], config['aws_secret_key'], security_token=config['security_token'], region=region, proxy=proxyHost, proxy_port=proxyPort)
else:
conn = EC2Connection(region=region, proxy=proxyHost, proxy_port=proxyPort)
else:
# non proxy:
# using roles
if config['aws_access_key']:
conn = EC2Connection(config['aws_access_key'], config['aws_secret_key'], security_token=config['security_token'], region=region)
else:
conn = EC2Connection(region=region)
# Connect to SNS
if sns_arn:
print('Connecting to SNS')
if proxyHost:
# proxy:
# using roles:
if config['aws_access_key']:
sns = boto.sns.connect_to_region(ec2_region_name, aws_access_key_id=config['aws_access_key'], aws_secret_access_key=config['aws_secret_key'],
security_token=config['security_token'], proxy=proxyHost, proxy_port=proxyPort)
else:
sns = boto.sns.connect_to_region(ec2_region_name, proxy=proxyHost, proxy_port=proxyPort)
else:
# non proxy:
# using roles
if config['aws_access_key']:
sns = boto.sns.connect_to_region(ec2_region_name, aws_access_key_id=config['aws_access_key'], aws_secret_access_key=config['aws_secret_key'])
else:
sns = boto.sns.connect_to_region(ec2_region_name)
def | (resource_id):
resource_tags = {}
if resource_id:
tags = conn.get_all_tags({'resource-id': resource_id})
for tag in tags:
# Tags starting with 'aws:' are reserved for internal use
if not tag.name.startswith('aws:'):
resource_tags[tag.name] = tag.value
return resource_tags
def set_resource_tags(resource, tags):
for tag_key, tag_value in tags.items():
if tag_key not in resource.tags or resource.tags[tag_key] != tag_value:
print('Tagging %(resource_id)s with [%(tag_key)s: %(tag_value)s]' % {
'resource_id': resource.id,
'tag_key': tag_key,
'tag_value': tag_value
})
resource.add_tag(tag_key, tag_value)
# Get all the volumes that match the tag criteria
print('Finding volumes that match the requested tag ({ "tag:%(tag_name)s": "%(tag_value)s" })' % config)
vols = conn.get_all_volumes(filters={'tag:' + config['tag_name']: config['tag_value']})
credentials_refresh_time = time.time()
for vol in vols:
refresh_aws_credentials()
try:
count_total += 1
logging.info(vol)
tags_volume = get_resource_tags(vol.id)
description = '%(period)s_snapshot %(vol_id)s_%(period)s_%(date_suffix)s by snapshot script at %(date)s' % {
'period': period,
'vol_id': vol.id,
'date_suffix': date_suffix,
'date': datetime.today().strftime('%d-%m-%Y %H:%M:%S')
}
try:
current_snap = vol.create_snapshot(description)
set_resource_tags(current_snap, tags_volume)
suc_message = 'Snapshot created with description: %s and tags: %s' % (description, str(tags_volume))
print(' ' + suc_message)
logging.info(suc_message)
total_creates += 1
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc()
logging.error(e)
# Copy snapshot to another region.
if copy_region_name:
try:
# We need to wait until the snapshot is complete before shipping them
while True:
refresh_aws_credentials()
if current_snap.status == 'completed':
break
time.sleep(10)
current_snap.update(validate=True)
print(' ' + current_snap.status)
dest_conn = boto3.client(
'ec2',
region_name=copy_region_name,
aws_access_key_id=config['aws_access_key'],
aws_secret_access_key=config['aws_secret_key'],
aws_session_token=config['security_token']
)
copied_snap = dest_conn.copy_snapshot(SourceRegion=ec2_region_name, SourceSnapshotId=current_snap.id,
Description='[Copied from %s] %s' % (ec2_region_name, description))
dest_conn.create_tags(Resources=[copied_snap['SnapshotId']], Tags=[
{'Key': 'source_snapshot_id', 'Value': current_snap.id},
{'Key': 'Name', 'Value': tags_volume.get('Name') or current_snap.id}
])
suc_message = 'Snapshot copied to ' + copy_region_name
print(' ' + suc_message)
logging.info(suc_message)
total_copies += 1
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc()
logging.error(e)
snapshots = vol.snapshots()
deletelist = []
for snap in snapshots:
sndesc = snap.description
if sndesc.startswith('week_snapshot') and period == 'week':
deletelist.append(snap)
elif sndesc.startswith('day_snapshot') and period == 'day':
deletelist.append(snap)
elif sndesc.startswith('month_snapshot') and period == 'month':
deletelist.append(snap)
else:
logging.info(' Skipping, not added to deletelist: ' + sndesc)
for snap in deletelist:
logging.info(snap)
logging.info(snap.start_time)
def date_compare(snapshot):
return snapshot.start_time
deletelist.sort(key=date_compare)
if period == 'day':
keep = keep_day
elif period == 'week':
keep = keep_week
elif period == 'month':
keep = keep_month
delta = len(deletelist) - keep
for i in range(delta):
del_message = ' Deleting snapshot ' + deletelist[i | get_resource_tags | identifier_name |
makesnapshots.py | 6: Public release
# version 2.0: Added daily, weekly and montly retention
# version 3.0: Rewrote deleting functions, changed description
# version 3.1: Fix a bug with the deletelist and added a pause in the volume loop
# version 3.2: Tags of the volume are placed on the new snapshot
# version 3.3: Merged IAM role addidtion from Github"""
import logging
import sys
import time
import traceback
from datetime import datetime
import boto3
import boto.ec2
import boto.sns
from boto.ec2.connection import EC2Connection
from boto.ec2.regioninfo import RegionInfo
from config import config
if len(sys.argv) < 2:
print('Please add a positional argument: day, week or month.')
quit()
else:
if sys.argv[1] == 'day':
period = 'day'
date_suffix = datetime.today().strftime('%a')
elif sys.argv[1] == 'week':
period = 'week'
date_suffix = datetime.today().strftime('%U')
elif sys.argv[1] == 'month':
period = 'month'
date_suffix = datetime.today().strftime('%b')
else:
print('Please use the parameter day, week or month')
quit()
# Message to return result via SNS
message = ""
errmsg = ""
# Counters
total_creates = 0
total_copies = 0
total_deletes = 0
count_errors = 0
# List with snapshots to delete
deletelist = []
# Setup logging
logging.basicConfig(filename=config['log_file'], level=logging.INFO)
start_message = 'Started taking %(period)s snapshots at %(date)s' % {
'period': period,
'date': datetime.today().strftime('%d-%m-%Y %H:%M:%S')
}
message += start_message + "\n\n"
logging.info(start_message)
# Assume IAM role if defined.
def refresh_aws_credentials():
global refresh_time
if config['iam_role_arn']:
if time.time() - refresh_time < 2700:
return
sts = boto3.client('sts')
assumedRoleObject = sts.assume_role(RoleArn=config['iam_role_arn'], RoleSessionName='AssumeRoleSession1')
credentials = assumedRoleObject['Credentials']
config['aws_access_key'] = credentials['AccessKeyId']
config['aws_secret_key'] = credentials['SecretAccessKey']
config['security_token'] = credentials['SessionToken']
refresh_time = time.time()
else:
config['security_token'] = None
refresh_time = 0
refresh_aws_credentials()
# Get settings from config.py
ec2_region_name = config['ec2_region_name']
ec2_region_endpoint = config['ec2_region_endpoint']
sns_arn = config.get('arn')
proxyHost = config.get('proxyHost')
proxyPort = config.get('proxyPort')
copy_region_name = config.get('copy_region_name')
region = RegionInfo(name=ec2_region_name, endpoint=ec2_region_endpoint)
# Number of snapshots to keep
keep_week = config['keep_week']
keep_day = config['keep_day']
keep_month = config['keep_month']
count_success = 0
count_total = 0
# Connect to AWS using the credentials provided above or in Environment vars or using IAM role.
print('Connecting to AWS')
if proxyHost:
# proxy:
# using roles
if config['aws_access_key']:
conn = EC2Connection(config['aws_access_key'], config['aws_secret_key'], security_token=config['security_token'], region=region, proxy=proxyHost, proxy_port=proxyPort)
else:
conn = EC2Connection(region=region, proxy=proxyHost, proxy_port=proxyPort)
else:
# non proxy:
# using roles
if config['aws_access_key']:
conn = EC2Connection(config['aws_access_key'], config['aws_secret_key'], security_token=config['security_token'], region=region)
else:
conn = EC2Connection(region=region)
# Connect to SNS
if sns_arn:
print('Connecting to SNS')
if proxyHost:
# proxy:
# using roles:
if config['aws_access_key']:
sns = boto.sns.connect_to_region(ec2_region_name, aws_access_key_id=config['aws_access_key'], aws_secret_access_key=config['aws_secret_key'],
security_token=config['security_token'], proxy=proxyHost, proxy_port=proxyPort)
else:
sns = boto.sns.connect_to_region(ec2_region_name, proxy=proxyHost, proxy_port=proxyPort)
else:
# non proxy:
# using roles
if config['aws_access_key']:
sns = boto.sns.connect_to_region(ec2_region_name, aws_access_key_id=config['aws_access_key'], aws_secret_access_key=config['aws_secret_key'])
else:
sns = boto.sns.connect_to_region(ec2_region_name)
def get_resource_tags(resource_id):
|
def set_resource_tags(resource, tags):
for tag_key, tag_value in tags.items():
if tag_key not in resource.tags or resource.tags[tag_key] != tag_value:
print('Tagging %(resource_id)s with [%(tag_key)s: %(tag_value)s]' % {
'resource_id': resource.id,
'tag_key': tag_key,
'tag_value': tag_value
})
resource.add_tag(tag_key, tag_value)
# Get all the volumes that match the tag criteria
print('Finding volumes that match the requested tag ({ "tag:%(tag_name)s": "%(tag_value)s" })' % config)
vols = conn.get_all_volumes(filters={'tag:' + config['tag_name']: config['tag_value']})
credentials_refresh_time = time.time()
for vol in vols:
refresh_aws_credentials()
try:
count_total += 1
logging.info(vol)
tags_volume = get_resource_tags(vol.id)
description = '%(period)s_snapshot %(vol_id)s_%(period)s_%(date_suffix)s by snapshot script at %(date)s' % {
'period': period,
'vol_id': vol.id,
'date_suffix': date_suffix,
'date': datetime.today().strftime('%d-%m-%Y %H:%M:%S')
}
try:
current_snap = vol.create_snapshot(description)
set_resource_tags(current_snap, tags_volume)
suc_message = 'Snapshot created with description: %s and tags: %s' % (description, str(tags_volume))
print(' ' + suc_message)
logging.info(suc_message)
total_creates += 1
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc()
logging.error(e)
# Copy snapshot to another region.
if copy_region_name:
try:
# We need to wait until the snapshot is complete before shipping them
while True:
refresh_aws_credentials()
if current_snap.status == 'completed':
break
time.sleep(10)
current_snap.update(validate=True)
print(' ' + current_snap.status)
dest_conn = boto3.client(
'ec2',
region_name=copy_region_name,
aws_access_key_id=config['aws_access_key'],
aws_secret_access_key=config['aws_secret_key'],
aws_session_token=config['security_token']
)
copied_snap = dest_conn.copy_snapshot(SourceRegion=ec2_region_name, SourceSnapshotId=current_snap.id,
Description='[Copied from %s] %s' % (ec2_region_name, description))
dest_conn.create_tags(Resources=[copied_snap['SnapshotId']], Tags=[
{'Key': 'source_snapshot_id', 'Value': current_snap.id},
{'Key': 'Name', 'Value': tags_volume.get('Name') or current_snap.id}
])
suc_message = 'Snapshot copied to ' + copy_region_name
print(' ' + suc_message)
logging.info(suc_message)
total_copies += 1
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc()
logging.error(e)
snapshots = vol.snapshots()
deletelist = []
for snap in snapshots:
sndesc = snap.description
if sndesc.startswith('week_snapshot') and period == 'week':
deletelist.append(snap)
elif sndesc.startswith('day_snapshot') and period == 'day':
deletelist.append(snap)
elif sndesc.startswith('month_snapshot') and period == 'month':
deletelist.append(snap)
else:
logging.info(' Skipping, not added to deletelist: ' + sndesc)
for snap in deletelist:
logging.info(snap)
logging.info(snap.start_time)
def date_compare(snapshot):
return snapshot.start_time
deletelist.sort(key=date_compare)
if period == 'day':
keep = keep_day
elif period == 'week':
keep = keep_week
elif period == 'month':
keep = keep_month
delta = len(deletelist) - keep
for i in range(delta):
del_message = ' Deleting snapshot ' + deletelist[i]. | resource_tags = {}
if resource_id:
tags = conn.get_all_tags({'resource-id': resource_id})
for tag in tags:
# Tags starting with 'aws:' are reserved for internal use
if not tag.name.startswith('aws:'):
resource_tags[tag.name] = tag.value
return resource_tags | identifier_body |
makesnapshots.py | 6: Public release
# version 2.0: Added daily, weekly and montly retention
# version 3.0: Rewrote deleting functions, changed description
# version 3.1: Fix a bug with the deletelist and added a pause in the volume loop
# version 3.2: Tags of the volume are placed on the new snapshot
# version 3.3: Merged IAM role addidtion from Github"""
import logging
import sys
import time
import traceback
from datetime import datetime
import boto3
import boto.ec2
import boto.sns
from boto.ec2.connection import EC2Connection
from boto.ec2.regioninfo import RegionInfo
from config import config
if len(sys.argv) < 2:
print('Please add a positional argument: day, week or month.')
quit()
else:
if sys.argv[1] == 'day':
period = 'day'
date_suffix = datetime.today().strftime('%a')
elif sys.argv[1] == 'week':
period = 'week'
date_suffix = datetime.today().strftime('%U')
elif sys.argv[1] == 'month':
period = 'month'
date_suffix = datetime.today().strftime('%b')
else:
print('Please use the parameter day, week or month')
quit()
# Message to return result via SNS
message = ""
errmsg = ""
# Counters
total_creates = 0
total_copies = 0
total_deletes = 0
count_errors = 0
# List with snapshots to delete
deletelist = []
# Setup logging
logging.basicConfig(filename=config['log_file'], level=logging.INFO)
start_message = 'Started taking %(period)s snapshots at %(date)s' % {
'period': period,
'date': datetime.today().strftime('%d-%m-%Y %H:%M:%S')
}
message += start_message + "\n\n"
logging.info(start_message)
# Assume IAM role if defined.
def refresh_aws_credentials():
global refresh_time
if config['iam_role_arn']:
if time.time() - refresh_time < 2700:
return
sts = boto3.client('sts')
assumedRoleObject = sts.assume_role(RoleArn=config['iam_role_arn'], RoleSessionName='AssumeRoleSession1')
credentials = assumedRoleObject['Credentials']
config['aws_access_key'] = credentials['AccessKeyId']
config['aws_secret_key'] = credentials['SecretAccessKey']
config['security_token'] = credentials['SessionToken']
refresh_time = time.time()
else:
config['security_token'] = None
refresh_time = 0
refresh_aws_credentials()
# Get settings from config.py
ec2_region_name = config['ec2_region_name']
ec2_region_endpoint = config['ec2_region_endpoint']
sns_arn = config.get('arn')
proxyHost = config.get('proxyHost')
proxyPort = config.get('proxyPort')
copy_region_name = config.get('copy_region_name')
region = RegionInfo(name=ec2_region_name, endpoint=ec2_region_endpoint)
# Number of snapshots to keep
keep_week = config['keep_week']
keep_day = config['keep_day']
keep_month = config['keep_month']
count_success = 0
count_total = 0
# Connect to AWS using the credentials provided above or in Environment vars or using IAM role.
print('Connecting to AWS')
if proxyHost:
# proxy:
# using roles
if config['aws_access_key']:
conn = EC2Connection(config['aws_access_key'], config['aws_secret_key'], security_token=config['security_token'], region=region, proxy=proxyHost, proxy_port=proxyPort)
else:
conn = EC2Connection(region=region, proxy=proxyHost, proxy_port=proxyPort)
else:
# non proxy:
# using roles
if config['aws_access_key']:
conn = EC2Connection(config['aws_access_key'], config['aws_secret_key'], security_token=config['security_token'], region=region)
else:
conn = EC2Connection(region=region)
# Connect to SNS
if sns_arn:
print('Connecting to SNS')
if proxyHost:
# proxy:
# using roles:
if config['aws_access_key']:
sns = boto.sns.connect_to_region(ec2_region_name, aws_access_key_id=config['aws_access_key'], aws_secret_access_key=config['aws_secret_key'],
security_token=config['security_token'], proxy=proxyHost, proxy_port=proxyPort)
else:
sns = boto.sns.connect_to_region(ec2_region_name, proxy=proxyHost, proxy_port=proxyPort)
else:
# non proxy:
# using roles
if config['aws_access_key']:
sns = boto.sns.connect_to_region(ec2_region_name, aws_access_key_id=config['aws_access_key'], aws_secret_access_key=config['aws_secret_key'])
else:
sns = boto.sns.connect_to_region(ec2_region_name)
def get_resource_tags(resource_id):
resource_tags = {}
if resource_id:
tags = conn.get_all_tags({'resource-id': resource_id})
for tag in tags:
# Tags starting with 'aws:' are reserved for internal use
if not tag.name.startswith('aws:'):
resource_tags[tag.name] = tag.value
return resource_tags
def set_resource_tags(resource, tags):
for tag_key, tag_value in tags.items():
if tag_key not in resource.tags or resource.tags[tag_key] != tag_value:
print('Tagging %(resource_id)s with [%(tag_key)s: %(tag_value)s]' % {
'resource_id': resource.id,
'tag_key': tag_key,
'tag_value': tag_value
})
resource.add_tag(tag_key, tag_value)
# Get all the volumes that match the tag criteria
print('Finding volumes that match the requested tag ({ "tag:%(tag_name)s": "%(tag_value)s" })' % config)
vols = conn.get_all_volumes(filters={'tag:' + config['tag_name']: config['tag_value']})
credentials_refresh_time = time.time() | try:
count_total += 1
logging.info(vol)
tags_volume = get_resource_tags(vol.id)
description = '%(period)s_snapshot %(vol_id)s_%(period)s_%(date_suffix)s by snapshot script at %(date)s' % {
'period': period,
'vol_id': vol.id,
'date_suffix': date_suffix,
'date': datetime.today().strftime('%d-%m-%Y %H:%M:%S')
}
try:
current_snap = vol.create_snapshot(description)
set_resource_tags(current_snap, tags_volume)
suc_message = 'Snapshot created with description: %s and tags: %s' % (description, str(tags_volume))
print(' ' + suc_message)
logging.info(suc_message)
total_creates += 1
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc()
logging.error(e)
# Copy snapshot to another region.
if copy_region_name:
try:
# We need to wait until the snapshot is complete before shipping them
while True:
refresh_aws_credentials()
if current_snap.status == 'completed':
break
time.sleep(10)
current_snap.update(validate=True)
print(' ' + current_snap.status)
dest_conn = boto3.client(
'ec2',
region_name=copy_region_name,
aws_access_key_id=config['aws_access_key'],
aws_secret_access_key=config['aws_secret_key'],
aws_session_token=config['security_token']
)
copied_snap = dest_conn.copy_snapshot(SourceRegion=ec2_region_name, SourceSnapshotId=current_snap.id,
Description='[Copied from %s] %s' % (ec2_region_name, description))
dest_conn.create_tags(Resources=[copied_snap['SnapshotId']], Tags=[
{'Key': 'source_snapshot_id', 'Value': current_snap.id},
{'Key': 'Name', 'Value': tags_volume.get('Name') or current_snap.id}
])
suc_message = 'Snapshot copied to ' + copy_region_name
print(' ' + suc_message)
logging.info(suc_message)
total_copies += 1
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc()
logging.error(e)
snapshots = vol.snapshots()
deletelist = []
for snap in snapshots:
sndesc = snap.description
if sndesc.startswith('week_snapshot') and period == 'week':
deletelist.append(snap)
elif sndesc.startswith('day_snapshot') and period == 'day':
deletelist.append(snap)
elif sndesc.startswith('month_snapshot') and period == 'month':
deletelist.append(snap)
else:
logging.info(' Skipping, not added to deletelist: ' + sndesc)
for snap in deletelist:
logging.info(snap)
logging.info(snap.start_time)
def date_compare(snapshot):
return snapshot.start_time
deletelist.sort(key=date_compare)
if period == 'day':
keep = keep_day
elif period == 'week':
keep = keep_week
elif period == 'month':
keep = keep_month
delta = len(deletelist) - keep
for i in range(delta):
del_message = ' Deleting snapshot ' + deletelist[i]. | for vol in vols:
refresh_aws_credentials() | random_line_split |
makesnapshots.py | 6: Public release
# version 2.0: Added daily, weekly and montly retention
# version 3.0: Rewrote deleting functions, changed description
# version 3.1: Fix a bug with the deletelist and added a pause in the volume loop
# version 3.2: Tags of the volume are placed on the new snapshot
# version 3.3: Merged IAM role addidtion from Github"""
import logging
import sys
import time
import traceback
from datetime import datetime
import boto3
import boto.ec2
import boto.sns
from boto.ec2.connection import EC2Connection
from boto.ec2.regioninfo import RegionInfo
from config import config
if len(sys.argv) < 2:
print('Please add a positional argument: day, week or month.')
quit()
else:
if sys.argv[1] == 'day':
period = 'day'
date_suffix = datetime.today().strftime('%a')
elif sys.argv[1] == 'week':
period = 'week'
date_suffix = datetime.today().strftime('%U')
elif sys.argv[1] == 'month':
period = 'month'
date_suffix = datetime.today().strftime('%b')
else:
print('Please use the parameter day, week or month')
quit()
# Message to return result via SNS
message = ""
errmsg = ""
# Counters
total_creates = 0
total_copies = 0
total_deletes = 0
count_errors = 0
# List with snapshots to delete
deletelist = []
# Setup logging
logging.basicConfig(filename=config['log_file'], level=logging.INFO)
start_message = 'Started taking %(period)s snapshots at %(date)s' % {
'period': period,
'date': datetime.today().strftime('%d-%m-%Y %H:%M:%S')
}
message += start_message + "\n\n"
logging.info(start_message)
# Assume IAM role if defined.
def refresh_aws_credentials():
global refresh_time
if config['iam_role_arn']:
if time.time() - refresh_time < 2700:
return
sts = boto3.client('sts')
assumedRoleObject = sts.assume_role(RoleArn=config['iam_role_arn'], RoleSessionName='AssumeRoleSession1')
credentials = assumedRoleObject['Credentials']
config['aws_access_key'] = credentials['AccessKeyId']
config['aws_secret_key'] = credentials['SecretAccessKey']
config['security_token'] = credentials['SessionToken']
refresh_time = time.time()
else:
config['security_token'] = None
refresh_time = 0
refresh_aws_credentials()
# Get settings from config.py
ec2_region_name = config['ec2_region_name']
ec2_region_endpoint = config['ec2_region_endpoint']
sns_arn = config.get('arn')
proxyHost = config.get('proxyHost')
proxyPort = config.get('proxyPort')
copy_region_name = config.get('copy_region_name')
region = RegionInfo(name=ec2_region_name, endpoint=ec2_region_endpoint)
# Number of snapshots to keep
keep_week = config['keep_week']
keep_day = config['keep_day']
keep_month = config['keep_month']
count_success = 0
count_total = 0
# Connect to AWS using the credentials provided above or in Environment vars or using IAM role.
print('Connecting to AWS')
if proxyHost:
# proxy:
# using roles
if config['aws_access_key']:
conn = EC2Connection(config['aws_access_key'], config['aws_secret_key'], security_token=config['security_token'], region=region, proxy=proxyHost, proxy_port=proxyPort)
else:
conn = EC2Connection(region=region, proxy=proxyHost, proxy_port=proxyPort)
else:
# non proxy:
# using roles
if config['aws_access_key']:
conn = EC2Connection(config['aws_access_key'], config['aws_secret_key'], security_token=config['security_token'], region=region)
else:
conn = EC2Connection(region=region)
# Connect to SNS
if sns_arn:
print('Connecting to SNS')
if proxyHost:
# proxy:
# using roles:
if config['aws_access_key']:
sns = boto.sns.connect_to_region(ec2_region_name, aws_access_key_id=config['aws_access_key'], aws_secret_access_key=config['aws_secret_key'],
security_token=config['security_token'], proxy=proxyHost, proxy_port=proxyPort)
else:
sns = boto.sns.connect_to_region(ec2_region_name, proxy=proxyHost, proxy_port=proxyPort)
else:
# non proxy:
# using roles
if config['aws_access_key']:
sns = boto.sns.connect_to_region(ec2_region_name, aws_access_key_id=config['aws_access_key'], aws_secret_access_key=config['aws_secret_key'])
else:
sns = boto.sns.connect_to_region(ec2_region_name)
def get_resource_tags(resource_id):
resource_tags = {}
if resource_id:
tags = conn.get_all_tags({'resource-id': resource_id})
for tag in tags:
# Tags starting with 'aws:' are reserved for internal use
if not tag.name.startswith('aws:'):
resource_tags[tag.name] = tag.value
return resource_tags
def set_resource_tags(resource, tags):
for tag_key, tag_value in tags.items():
if tag_key not in resource.tags or resource.tags[tag_key] != tag_value:
print('Tagging %(resource_id)s with [%(tag_key)s: %(tag_value)s]' % {
'resource_id': resource.id,
'tag_key': tag_key,
'tag_value': tag_value
})
resource.add_tag(tag_key, tag_value)
# Get all the volumes that match the tag criteria
print('Finding volumes that match the requested tag ({ "tag:%(tag_name)s": "%(tag_value)s" })' % config)
vols = conn.get_all_volumes(filters={'tag:' + config['tag_name']: config['tag_value']})
credentials_refresh_time = time.time()
for vol in vols:
refresh_aws_credentials()
try:
count_total += 1
logging.info(vol)
tags_volume = get_resource_tags(vol.id)
description = '%(period)s_snapshot %(vol_id)s_%(period)s_%(date_suffix)s by snapshot script at %(date)s' % {
'period': period,
'vol_id': vol.id,
'date_suffix': date_suffix,
'date': datetime.today().strftime('%d-%m-%Y %H:%M:%S')
}
try:
current_snap = vol.create_snapshot(description)
set_resource_tags(current_snap, tags_volume)
suc_message = 'Snapshot created with description: %s and tags: %s' % (description, str(tags_volume))
print(' ' + suc_message)
logging.info(suc_message)
total_creates += 1
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc()
logging.error(e)
# Copy snapshot to another region.
if copy_region_name:
try:
# We need to wait until the snapshot is complete before shipping them
while True:
refresh_aws_credentials()
if current_snap.status == 'completed':
|
time.sleep(10)
current_snap.update(validate=True)
print(' ' + current_snap.status)
dest_conn = boto3.client(
'ec2',
region_name=copy_region_name,
aws_access_key_id=config['aws_access_key'],
aws_secret_access_key=config['aws_secret_key'],
aws_session_token=config['security_token']
)
copied_snap = dest_conn.copy_snapshot(SourceRegion=ec2_region_name, SourceSnapshotId=current_snap.id,
Description='[Copied from %s] %s' % (ec2_region_name, description))
dest_conn.create_tags(Resources=[copied_snap['SnapshotId']], Tags=[
{'Key': 'source_snapshot_id', 'Value': current_snap.id},
{'Key': 'Name', 'Value': tags_volume.get('Name') or current_snap.id}
])
suc_message = 'Snapshot copied to ' + copy_region_name
print(' ' + suc_message)
logging.info(suc_message)
total_copies += 1
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc()
logging.error(e)
snapshots = vol.snapshots()
deletelist = []
for snap in snapshots:
sndesc = snap.description
if sndesc.startswith('week_snapshot') and period == 'week':
deletelist.append(snap)
elif sndesc.startswith('day_snapshot') and period == 'day':
deletelist.append(snap)
elif sndesc.startswith('month_snapshot') and period == 'month':
deletelist.append(snap)
else:
logging.info(' Skipping, not added to deletelist: ' + sndesc)
for snap in deletelist:
logging.info(snap)
logging.info(snap.start_time)
def date_compare(snapshot):
return snapshot.start_time
deletelist.sort(key=date_compare)
if period == 'day':
keep = keep_day
elif period == 'week':
keep = keep_week
elif period == 'month':
keep = keep_month
delta = len(deletelist) - keep
for i in range(delta):
del_message = ' Deleting snapshot ' + deletelist[i]. | break | conditional_block |
parser.rs | it and
/// get back produced [`Event`]s.
#[derive(Clone)]
pub(super) struct Parser {
/// Number of bytes read from the source of data since the parser was created
pub offset: usize,
/// Defines how to process next byte
pub state: ParseState,
/// Expand empty element into an opening and closing element
pub expand_empty_elements: bool,
/// Trims leading whitespace in Text events, skip the element if text is empty
pub trim_text_start: bool,
/// Trims trailing whitespace in Text events.
pub trim_text_end: bool,
/// Trims trailing whitespaces from markup names in closing tags `</a >`
pub trim_markup_names_in_closing_tags: bool,
/// Check if [`Event::End`] nodes match last [`Event::Start`] node
pub check_end_names: bool,
/// Check if comments contains `--` (false per default)
pub check_comments: bool,
/// All currently Started elements which didn't have a matching
/// End element yet.
///
/// For an XML
///
/// ```xml
/// <root><one/><inner attr="value">|<tag></inner></root>
/// ```
/// when cursor at the `|` position buffer contains:
///
/// ```text
/// rootinner
/// ^ ^
/// ```
///
/// The `^` symbols shows which positions stored in the [`Self::opened_starts`]
/// (0 and 4 in that case).
opened_buffer: Vec<u8>,
/// Opened name start indexes into [`Self::opened_buffer`]. See documentation
/// for that field for details
opened_starts: Vec<usize>,
#[cfg(feature = "encoding")]
/// Reference to the encoding used to read an XML
pub encoding: EncodingRef,
}
impl Parser {
/// Trims whitespaces from `bytes`, if required, and returns a [`Text`] event.
///
/// # Parameters
/// - `bytes`: data from the start of stream to the first `<` or from `>` to `<`
///
/// [`Text`]: Event::Text
pub fn | <'b>(&mut self, bytes: &'b [u8]) -> Result<Event<'b>> {
let mut content = bytes;
if self.trim_text_end {
// Skip the ending '<'
let len = bytes
.iter()
.rposition(|&b| !is_whitespace(b))
.map_or_else(|| bytes.len(), |p| p + 1);
content = &bytes[..len];
}
Ok(Event::Text(BytesText::wrap(content, self.decoder())))
}
/// reads `BytesElement` starting with a `!`,
/// return `Comment`, `CData` or `DocType` event
pub fn emit_bang<'b>(&mut self, bang_type: BangType, buf: &'b [u8]) -> Result<Event<'b>> {
let uncased_starts_with = |string: &[u8], prefix: &[u8]| {
string.len() >= prefix.len() && string[..prefix.len()].eq_ignore_ascii_case(prefix)
};
let len = buf.len();
match bang_type {
BangType::Comment if buf.starts_with(b"!--") => {
debug_assert!(buf.ends_with(b"--"));
if self.check_comments {
// search if '--' not in comments
if let Some(p) = memchr::memchr_iter(b'-', &buf[3..len - 2])
.position(|p| buf[3 + p + 1] == b'-')
{
self.offset += len - p;
return Err(Error::UnexpectedToken("--".to_string()));
}
}
Ok(Event::Comment(BytesText::wrap(
&buf[3..len - 2],
self.decoder(),
)))
}
BangType::CData if uncased_starts_with(buf, b"![CDATA[") => {
debug_assert!(buf.ends_with(b"]]"));
Ok(Event::CData(BytesCData::wrap(
&buf[8..len - 2],
self.decoder(),
)))
}
BangType::DocType if uncased_starts_with(buf, b"!DOCTYPE") => {
let start = buf[8..]
.iter()
.position(|b| !is_whitespace(*b))
.unwrap_or(len - 8);
if start + 8 >= len {
return Err(Error::EmptyDocType);
}
Ok(Event::DocType(BytesText::wrap(
&buf[8 + start..],
self.decoder(),
)))
}
_ => Err(bang_type.to_err()),
}
}
/// Wraps content of `buf` into the [`Event::End`] event. Does the check that
/// end name matches the last opened start name if `self.check_end_names` is set.
pub fn emit_end<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
// XML standard permits whitespaces after the markup name in closing tags.
// Let's strip them from the buffer before comparing tag names.
let name = if self.trim_markup_names_in_closing_tags {
if let Some(pos_end_name) = buf[1..].iter().rposition(|&b| !b.is_ascii_whitespace()) {
let (name, _) = buf[1..].split_at(pos_end_name + 1);
name
} else {
&buf[1..]
}
} else {
&buf[1..]
};
let decoder = self.decoder();
let mismatch_err = |expected: String, found: &[u8], offset: &mut usize| {
*offset -= buf.len();
Err(Error::EndEventMismatch {
expected,
found: decoder.decode(found).unwrap_or_default().into_owned(),
})
};
// Get the index in self.opened_buffer of the name of the last opened tag
match self.opened_starts.pop() {
Some(start) => {
if self.check_end_names {
let expected = &self.opened_buffer[start..];
if name != expected {
let expected = decoder.decode(expected).unwrap_or_default().into_owned();
// #513: In order to allow error recovery we should drop content of the buffer
self.opened_buffer.truncate(start);
return mismatch_err(expected, name, &mut self.offset);
}
}
self.opened_buffer.truncate(start);
}
None => {
if self.check_end_names {
return mismatch_err("".to_string(), &buf[1..], &mut self.offset);
}
}
}
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// reads `BytesElement` starting with a `?`,
/// return `Decl` or `PI` event
pub fn emit_question_mark<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
let len = buf.len();
if len > 2 && buf[len - 1] == b'?' {
if len > 5 && &buf[1..4] == b"xml" && is_whitespace(buf[4]) {
let event = BytesDecl::from_start(BytesStart::wrap(&buf[1..len - 1], 3));
// Try getting encoding from the declaration event
#[cfg(feature = "encoding")]
if self.encoding.can_be_refined() {
if let Some(encoding) = event.encoder() {
self.encoding = EncodingRef::XmlDetected(encoding);
}
}
Ok(Event::Decl(event))
} else {
Ok(Event::PI(BytesText::wrap(&buf[1..len - 1], self.decoder())))
}
} else {
self.offset -= len;
Err(Error::UnexpectedEof("XmlDecl".to_string()))
}
}
/// Converts content of a tag to a `Start` or an `Empty` event
///
/// # Parameters
/// - `content`: Content of a tag between `<` and `>`
pub fn emit_start<'b>(&mut self, content: &'b [u8]) -> Result<Event<'b>> {
let len = content.len();
let name_end = content
.iter()
.position(|&b| is_whitespace(b))
.unwrap_or(len);
if let Some(&b'/') = content.last() {
// This is self-closed tag `<something/>`
let name_len = if name_end < len { name_end } else { len - 1 };
let event = BytesStart::wrap(&content[..len - 1], name_len);
if self.expand_empty_elements {
self.state = ParseState::Empty;
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_len]);
Ok(Event::Start(event))
} else {
Ok(Event::Empty(event))
}
} else {
// #514: Always store names event when .check_end_names == false,
// because checks can be temporary disabled and when they would be
// enabled, we should have that information
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_end]);
Ok(Event::Start(BytesStart::wrap(content, name_end)))
}
}
#[inline]
pub fn close_expanded_empty(&mut self) -> Result<Event<'static>> {
self.state | emit_text | identifier_name |
parser.rs | it and
/// get back produced [`Event`]s. | /// Defines how to process next byte
pub state: ParseState,
/// Expand empty element into an opening and closing element
pub expand_empty_elements: bool,
/// Trims leading whitespace in Text events, skip the element if text is empty
pub trim_text_start: bool,
/// Trims trailing whitespace in Text events.
pub trim_text_end: bool,
/// Trims trailing whitespaces from markup names in closing tags `</a >`
pub trim_markup_names_in_closing_tags: bool,
/// Check if [`Event::End`] nodes match last [`Event::Start`] node
pub check_end_names: bool,
/// Check if comments contains `--` (false per default)
pub check_comments: bool,
/// All currently Started elements which didn't have a matching
/// End element yet.
///
/// For an XML
///
/// ```xml
/// <root><one/><inner attr="value">|<tag></inner></root>
/// ```
/// when cursor at the `|` position buffer contains:
///
/// ```text
/// rootinner
/// ^ ^
/// ```
///
/// The `^` symbols shows which positions stored in the [`Self::opened_starts`]
/// (0 and 4 in that case).
opened_buffer: Vec<u8>,
/// Opened name start indexes into [`Self::opened_buffer`]. See documentation
/// for that field for details
opened_starts: Vec<usize>,
#[cfg(feature = "encoding")]
/// Reference to the encoding used to read an XML
pub encoding: EncodingRef,
}
impl Parser {
/// Trims whitespaces from `bytes`, if required, and returns a [`Text`] event.
///
/// # Parameters
/// - `bytes`: data from the start of stream to the first `<` or from `>` to `<`
///
/// [`Text`]: Event::Text
pub fn emit_text<'b>(&mut self, bytes: &'b [u8]) -> Result<Event<'b>> {
let mut content = bytes;
if self.trim_text_end {
// Skip the ending '<'
let len = bytes
.iter()
.rposition(|&b| !is_whitespace(b))
.map_or_else(|| bytes.len(), |p| p + 1);
content = &bytes[..len];
}
Ok(Event::Text(BytesText::wrap(content, self.decoder())))
}
/// reads `BytesElement` starting with a `!`,
/// return `Comment`, `CData` or `DocType` event
pub fn emit_bang<'b>(&mut self, bang_type: BangType, buf: &'b [u8]) -> Result<Event<'b>> {
let uncased_starts_with = |string: &[u8], prefix: &[u8]| {
string.len() >= prefix.len() && string[..prefix.len()].eq_ignore_ascii_case(prefix)
};
let len = buf.len();
match bang_type {
BangType::Comment if buf.starts_with(b"!--") => {
debug_assert!(buf.ends_with(b"--"));
if self.check_comments {
// search if '--' not in comments
if let Some(p) = memchr::memchr_iter(b'-', &buf[3..len - 2])
.position(|p| buf[3 + p + 1] == b'-')
{
self.offset += len - p;
return Err(Error::UnexpectedToken("--".to_string()));
}
}
Ok(Event::Comment(BytesText::wrap(
&buf[3..len - 2],
self.decoder(),
)))
}
BangType::CData if uncased_starts_with(buf, b"![CDATA[") => {
debug_assert!(buf.ends_with(b"]]"));
Ok(Event::CData(BytesCData::wrap(
&buf[8..len - 2],
self.decoder(),
)))
}
BangType::DocType if uncased_starts_with(buf, b"!DOCTYPE") => {
let start = buf[8..]
.iter()
.position(|b| !is_whitespace(*b))
.unwrap_or(len - 8);
if start + 8 >= len {
return Err(Error::EmptyDocType);
}
Ok(Event::DocType(BytesText::wrap(
&buf[8 + start..],
self.decoder(),
)))
}
_ => Err(bang_type.to_err()),
}
}
/// Wraps content of `buf` into the [`Event::End`] event. Does the check that
/// end name matches the last opened start name if `self.check_end_names` is set.
pub fn emit_end<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
// XML standard permits whitespaces after the markup name in closing tags.
// Let's strip them from the buffer before comparing tag names.
let name = if self.trim_markup_names_in_closing_tags {
if let Some(pos_end_name) = buf[1..].iter().rposition(|&b| !b.is_ascii_whitespace()) {
let (name, _) = buf[1..].split_at(pos_end_name + 1);
name
} else {
&buf[1..]
}
} else {
&buf[1..]
};
let decoder = self.decoder();
let mismatch_err = |expected: String, found: &[u8], offset: &mut usize| {
*offset -= buf.len();
Err(Error::EndEventMismatch {
expected,
found: decoder.decode(found).unwrap_or_default().into_owned(),
})
};
// Get the index in self.opened_buffer of the name of the last opened tag
match self.opened_starts.pop() {
Some(start) => {
if self.check_end_names {
let expected = &self.opened_buffer[start..];
if name != expected {
let expected = decoder.decode(expected).unwrap_or_default().into_owned();
// #513: In order to allow error recovery we should drop content of the buffer
self.opened_buffer.truncate(start);
return mismatch_err(expected, name, &mut self.offset);
}
}
self.opened_buffer.truncate(start);
}
None => {
if self.check_end_names {
return mismatch_err("".to_string(), &buf[1..], &mut self.offset);
}
}
}
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// reads `BytesElement` starting with a `?`,
/// return `Decl` or `PI` event
pub fn emit_question_mark<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
let len = buf.len();
if len > 2 && buf[len - 1] == b'?' {
if len > 5 && &buf[1..4] == b"xml" && is_whitespace(buf[4]) {
let event = BytesDecl::from_start(BytesStart::wrap(&buf[1..len - 1], 3));
// Try getting encoding from the declaration event
#[cfg(feature = "encoding")]
if self.encoding.can_be_refined() {
if let Some(encoding) = event.encoder() {
self.encoding = EncodingRef::XmlDetected(encoding);
}
}
Ok(Event::Decl(event))
} else {
Ok(Event::PI(BytesText::wrap(&buf[1..len - 1], self.decoder())))
}
} else {
self.offset -= len;
Err(Error::UnexpectedEof("XmlDecl".to_string()))
}
}
/// Converts content of a tag to a `Start` or an `Empty` event
///
/// # Parameters
/// - `content`: Content of a tag between `<` and `>`
pub fn emit_start<'b>(&mut self, content: &'b [u8]) -> Result<Event<'b>> {
let len = content.len();
let name_end = content
.iter()
.position(|&b| is_whitespace(b))
.unwrap_or(len);
if let Some(&b'/') = content.last() {
// This is self-closed tag `<something/>`
let name_len = if name_end < len { name_end } else { len - 1 };
let event = BytesStart::wrap(&content[..len - 1], name_len);
if self.expand_empty_elements {
self.state = ParseState::Empty;
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_len]);
Ok(Event::Start(event))
} else {
Ok(Event::Empty(event))
}
} else {
// #514: Always store names event when .check_end_names == false,
// because checks can be temporary disabled and when they would be
// enabled, we should have that information
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_end]);
Ok(Event::Start(BytesStart::wrap(content, name_end)))
}
}
#[inline]
pub fn close_expanded_empty(&mut self) -> Result<Event<'static>> {
self.state = | #[derive(Clone)]
pub(super) struct Parser {
/// Number of bytes read from the source of data since the parser was created
pub offset: usize, | random_line_split |
parser.rs | if [`Event::End`] nodes match last [`Event::Start`] node
pub check_end_names: bool,
/// Check if comments contains `--` (false per default)
pub check_comments: bool,
/// All currently Started elements which didn't have a matching
/// End element yet.
///
/// For an XML
///
/// ```xml
/// <root><one/><inner attr="value">|<tag></inner></root>
/// ```
/// when cursor at the `|` position buffer contains:
///
/// ```text
/// rootinner
/// ^ ^
/// ```
///
/// The `^` symbols shows which positions stored in the [`Self::opened_starts`]
/// (0 and 4 in that case).
opened_buffer: Vec<u8>,
/// Opened name start indexes into [`Self::opened_buffer`]. See documentation
/// for that field for details
opened_starts: Vec<usize>,
#[cfg(feature = "encoding")]
/// Reference to the encoding used to read an XML
pub encoding: EncodingRef,
}
impl Parser {
/// Trims whitespaces from `bytes`, if required, and returns a [`Text`] event.
///
/// # Parameters
/// - `bytes`: data from the start of stream to the first `<` or from `>` to `<`
///
/// [`Text`]: Event::Text
pub fn emit_text<'b>(&mut self, bytes: &'b [u8]) -> Result<Event<'b>> {
let mut content = bytes;
if self.trim_text_end {
// Skip the ending '<'
let len = bytes
.iter()
.rposition(|&b| !is_whitespace(b))
.map_or_else(|| bytes.len(), |p| p + 1);
content = &bytes[..len];
}
Ok(Event::Text(BytesText::wrap(content, self.decoder())))
}
/// reads `BytesElement` starting with a `!`,
/// return `Comment`, `CData` or `DocType` event
pub fn emit_bang<'b>(&mut self, bang_type: BangType, buf: &'b [u8]) -> Result<Event<'b>> {
let uncased_starts_with = |string: &[u8], prefix: &[u8]| {
string.len() >= prefix.len() && string[..prefix.len()].eq_ignore_ascii_case(prefix)
};
let len = buf.len();
match bang_type {
BangType::Comment if buf.starts_with(b"!--") => {
debug_assert!(buf.ends_with(b"--"));
if self.check_comments {
// search if '--' not in comments
if let Some(p) = memchr::memchr_iter(b'-', &buf[3..len - 2])
.position(|p| buf[3 + p + 1] == b'-')
{
self.offset += len - p;
return Err(Error::UnexpectedToken("--".to_string()));
}
}
Ok(Event::Comment(BytesText::wrap(
&buf[3..len - 2],
self.decoder(),
)))
}
BangType::CData if uncased_starts_with(buf, b"![CDATA[") => {
debug_assert!(buf.ends_with(b"]]"));
Ok(Event::CData(BytesCData::wrap(
&buf[8..len - 2],
self.decoder(),
)))
}
BangType::DocType if uncased_starts_with(buf, b"!DOCTYPE") => {
let start = buf[8..]
.iter()
.position(|b| !is_whitespace(*b))
.unwrap_or(len - 8);
if start + 8 >= len {
return Err(Error::EmptyDocType);
}
Ok(Event::DocType(BytesText::wrap(
&buf[8 + start..],
self.decoder(),
)))
}
_ => Err(bang_type.to_err()),
}
}
/// Wraps content of `buf` into the [`Event::End`] event. Does the check that
/// end name matches the last opened start name if `self.check_end_names` is set.
pub fn emit_end<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
// XML standard permits whitespaces after the markup name in closing tags.
// Let's strip them from the buffer before comparing tag names.
let name = if self.trim_markup_names_in_closing_tags {
if let Some(pos_end_name) = buf[1..].iter().rposition(|&b| !b.is_ascii_whitespace()) {
let (name, _) = buf[1..].split_at(pos_end_name + 1);
name
} else {
&buf[1..]
}
} else {
&buf[1..]
};
let decoder = self.decoder();
let mismatch_err = |expected: String, found: &[u8], offset: &mut usize| {
*offset -= buf.len();
Err(Error::EndEventMismatch {
expected,
found: decoder.decode(found).unwrap_or_default().into_owned(),
})
};
// Get the index in self.opened_buffer of the name of the last opened tag
match self.opened_starts.pop() {
Some(start) => {
if self.check_end_names {
let expected = &self.opened_buffer[start..];
if name != expected {
let expected = decoder.decode(expected).unwrap_or_default().into_owned();
// #513: In order to allow error recovery we should drop content of the buffer
self.opened_buffer.truncate(start);
return mismatch_err(expected, name, &mut self.offset);
}
}
self.opened_buffer.truncate(start);
}
None => {
if self.check_end_names {
return mismatch_err("".to_string(), &buf[1..], &mut self.offset);
}
}
}
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// reads `BytesElement` starting with a `?`,
/// return `Decl` or `PI` event
pub fn emit_question_mark<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
let len = buf.len();
if len > 2 && buf[len - 1] == b'?' {
if len > 5 && &buf[1..4] == b"xml" && is_whitespace(buf[4]) {
let event = BytesDecl::from_start(BytesStart::wrap(&buf[1..len - 1], 3));
// Try getting encoding from the declaration event
#[cfg(feature = "encoding")]
if self.encoding.can_be_refined() {
if let Some(encoding) = event.encoder() {
self.encoding = EncodingRef::XmlDetected(encoding);
}
}
Ok(Event::Decl(event))
} else {
Ok(Event::PI(BytesText::wrap(&buf[1..len - 1], self.decoder())))
}
} else {
self.offset -= len;
Err(Error::UnexpectedEof("XmlDecl".to_string()))
}
}
/// Converts content of a tag to a `Start` or an `Empty` event
///
/// # Parameters
/// - `content`: Content of a tag between `<` and `>`
pub fn emit_start<'b>(&mut self, content: &'b [u8]) -> Result<Event<'b>> {
let len = content.len();
let name_end = content
.iter()
.position(|&b| is_whitespace(b))
.unwrap_or(len);
if let Some(&b'/') = content.last() {
// This is self-closed tag `<something/>`
let name_len = if name_end < len { name_end } else { len - 1 };
let event = BytesStart::wrap(&content[..len - 1], name_len);
if self.expand_empty_elements {
self.state = ParseState::Empty;
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_len]);
Ok(Event::Start(event))
} else {
Ok(Event::Empty(event))
}
} else {
// #514: Always store names event when .check_end_names == false,
// because checks can be temporary disabled and when they would be
// enabled, we should have that information
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_end]);
Ok(Event::Start(BytesStart::wrap(content, name_end)))
}
}
#[inline]
pub fn close_expanded_empty(&mut self) -> Result<Event<'static>> {
self.state = ParseState::ClosedTag;
let name = self
.opened_buffer
.split_off(self.opened_starts.pop().unwrap());
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// Get the decoder, used to decode bytes, read by this reader, to the strings.
///
/// If `encoding` feature is enabled, the used encoding may change after
/// parsing the XML declaration, otherwise encoding is fixed to UTF-8.
///
/// If `encoding` feature is enabled and no encoding is specified in declaration,
/// defaults to UTF-8.
pub fn decoder(&self) -> Decoder | {
Decoder {
#[cfg(feature = "encoding")]
encoding: self.encoding.encoding(),
}
} | identifier_body |
|
parser.rs | it and
/// get back produced [`Event`]s.
#[derive(Clone)]
pub(super) struct Parser {
/// Number of bytes read from the source of data since the parser was created
pub offset: usize,
/// Defines how to process next byte
pub state: ParseState,
/// Expand empty element into an opening and closing element
pub expand_empty_elements: bool,
/// Trims leading whitespace in Text events, skip the element if text is empty
pub trim_text_start: bool,
/// Trims trailing whitespace in Text events.
pub trim_text_end: bool,
/// Trims trailing whitespaces from markup names in closing tags `</a >`
pub trim_markup_names_in_closing_tags: bool,
/// Check if [`Event::End`] nodes match last [`Event::Start`] node
pub check_end_names: bool,
/// Check if comments contains `--` (false per default)
pub check_comments: bool,
/// All currently Started elements which didn't have a matching
/// End element yet.
///
/// For an XML
///
/// ```xml
/// <root><one/><inner attr="value">|<tag></inner></root>
/// ```
/// when cursor at the `|` position buffer contains:
///
/// ```text
/// rootinner
/// ^ ^
/// ```
///
/// The `^` symbols shows which positions stored in the [`Self::opened_starts`]
/// (0 and 4 in that case).
opened_buffer: Vec<u8>,
/// Opened name start indexes into [`Self::opened_buffer`]. See documentation
/// for that field for details
opened_starts: Vec<usize>,
#[cfg(feature = "encoding")]
/// Reference to the encoding used to read an XML
pub encoding: EncodingRef,
}
impl Parser {
/// Trims whitespaces from `bytes`, if required, and returns a [`Text`] event.
///
/// # Parameters
/// - `bytes`: data from the start of stream to the first `<` or from `>` to `<`
///
/// [`Text`]: Event::Text
pub fn emit_text<'b>(&mut self, bytes: &'b [u8]) -> Result<Event<'b>> {
let mut content = bytes;
if self.trim_text_end {
// Skip the ending '<'
let len = bytes
.iter()
.rposition(|&b| !is_whitespace(b))
.map_or_else(|| bytes.len(), |p| p + 1);
content = &bytes[..len];
}
Ok(Event::Text(BytesText::wrap(content, self.decoder())))
}
/// reads `BytesElement` starting with a `!`,
/// return `Comment`, `CData` or `DocType` event
pub fn emit_bang<'b>(&mut self, bang_type: BangType, buf: &'b [u8]) -> Result<Event<'b>> {
let uncased_starts_with = |string: &[u8], prefix: &[u8]| {
string.len() >= prefix.len() && string[..prefix.len()].eq_ignore_ascii_case(prefix)
};
let len = buf.len();
match bang_type {
BangType::Comment if buf.starts_with(b"!--") => {
debug_assert!(buf.ends_with(b"--"));
if self.check_comments {
// search if '--' not in comments
if let Some(p) = memchr::memchr_iter(b'-', &buf[3..len - 2])
.position(|p| buf[3 + p + 1] == b'-')
{
self.offset += len - p;
return Err(Error::UnexpectedToken("--".to_string()));
}
}
Ok(Event::Comment(BytesText::wrap(
&buf[3..len - 2],
self.decoder(),
)))
}
BangType::CData if uncased_starts_with(buf, b"![CDATA[") => {
debug_assert!(buf.ends_with(b"]]"));
Ok(Event::CData(BytesCData::wrap(
&buf[8..len - 2],
self.decoder(),
)))
}
BangType::DocType if uncased_starts_with(buf, b"!DOCTYPE") => {
let start = buf[8..]
.iter()
.position(|b| !is_whitespace(*b))
.unwrap_or(len - 8);
if start + 8 >= len {
return Err(Error::EmptyDocType);
}
Ok(Event::DocType(BytesText::wrap(
&buf[8 + start..],
self.decoder(),
)))
}
_ => Err(bang_type.to_err()),
}
}
/// Wraps content of `buf` into the [`Event::End`] event. Does the check that
/// end name matches the last opened start name if `self.check_end_names` is set.
pub fn emit_end<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
// XML standard permits whitespaces after the markup name in closing tags.
// Let's strip them from the buffer before comparing tag names.
let name = if self.trim_markup_names_in_closing_tags {
if let Some(pos_end_name) = buf[1..].iter().rposition(|&b| !b.is_ascii_whitespace()) {
let (name, _) = buf[1..].split_at(pos_end_name + 1);
name
} else {
&buf[1..]
}
} else {
&buf[1..]
};
let decoder = self.decoder();
let mismatch_err = |expected: String, found: &[u8], offset: &mut usize| {
*offset -= buf.len();
Err(Error::EndEventMismatch {
expected,
found: decoder.decode(found).unwrap_or_default().into_owned(),
})
};
// Get the index in self.opened_buffer of the name of the last opened tag
match self.opened_starts.pop() {
Some(start) => {
if self.check_end_names {
let expected = &self.opened_buffer[start..];
if name != expected {
let expected = decoder.decode(expected).unwrap_or_default().into_owned();
// #513: In order to allow error recovery we should drop content of the buffer
self.opened_buffer.truncate(start);
return mismatch_err(expected, name, &mut self.offset);
}
}
self.opened_buffer.truncate(start);
}
None => {
if self.check_end_names {
return mismatch_err("".to_string(), &buf[1..], &mut self.offset);
}
}
}
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// reads `BytesElement` starting with a `?`,
/// return `Decl` or `PI` event
pub fn emit_question_mark<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
let len = buf.len();
if len > 2 && buf[len - 1] == b'?' {
if len > 5 && &buf[1..4] == b"xml" && is_whitespace(buf[4]) {
let event = BytesDecl::from_start(BytesStart::wrap(&buf[1..len - 1], 3));
// Try getting encoding from the declaration event
#[cfg(feature = "encoding")]
if self.encoding.can_be_refined() {
if let Some(encoding) = event.encoder() |
}
Ok(Event::Decl(event))
} else {
Ok(Event::PI(BytesText::wrap(&buf[1..len - 1], self.decoder())))
}
} else {
self.offset -= len;
Err(Error::UnexpectedEof("XmlDecl".to_string()))
}
}
/// Converts content of a tag to a `Start` or an `Empty` event
///
/// # Parameters
/// - `content`: Content of a tag between `<` and `>`
pub fn emit_start<'b>(&mut self, content: &'b [u8]) -> Result<Event<'b>> {
let len = content.len();
let name_end = content
.iter()
.position(|&b| is_whitespace(b))
.unwrap_or(len);
if let Some(&b'/') = content.last() {
// This is self-closed tag `<something/>`
let name_len = if name_end < len { name_end } else { len - 1 };
let event = BytesStart::wrap(&content[..len - 1], name_len);
if self.expand_empty_elements {
self.state = ParseState::Empty;
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_len]);
Ok(Event::Start(event))
} else {
Ok(Event::Empty(event))
}
} else {
// #514: Always store names event when .check_end_names == false,
// because checks can be temporary disabled and when they would be
// enabled, we should have that information
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_end]);
Ok(Event::Start(BytesStart::wrap(content, name_end)))
}
}
#[inline]
pub fn close_expanded_empty(&mut self) -> Result<Event<'static>> {
self | {
self.encoding = EncodingRef::XmlDetected(encoding);
} | conditional_block |
api_consumer_mesh_suite.go | grpcServer *grpc.Server
grpcListener net.Listener
serviceToken string
testService *namingpb.Service
}
//套件名字
func (t *ConsumerMeshTestingSuite) GetName() string {
return "Consumer"
}
//SetUpSuite 启动测试套程序
func (t *ConsumerMeshTestingSuite) SetUpSuite(c *check.C) {
grpcOptions := make([]grpc.ServerOption, 0)
maxStreams := 100000
grpcOptions = append(grpcOptions, grpc.MaxConcurrentStreams(uint32(maxStreams)))
log.Printf("ConsumerMeshTestingSuite SetUpSuite")
// get the grpc server wired up
grpc.EnableTracing = true
ipAddr := consumerIPAddress
shopPort := consumerPort
var err error
t.grpcServer = grpc.NewServer(grpcOptions...)
t.serviceToken = uuid.New().String()
t.mockServer = mock.NewNamingServer()
token := t.mockServer.RegisterServerService(config.ServerDiscoverService)
t.mockServer.RegisterServerInstance(ipAddr, shopPort, config.ServerDiscoverService, token, true)
t.mockServer.RegisterNamespace(&namingpb.Namespace{
Name: &wrappers.StringValue{Value: consumerNamespace},
Comment: &wrappers.StringValue{Value: "for consumer api test"},
Owners: &wrappers.StringValue{Value: "ConsumerAPI"},
})
t.mockServer.RegisterServerServices(ipAddr, shopPort)
t.testService = &namingpb.Service{
Name: &wrappers.StringValue{Value: consumerService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: t.serviceToken},
}
t.mockServer.RegisterService(t.testService)
t.mockServer.GenTestInstances(t.testService, normalInstances)
t.mockServer.GenInstancesWithStatus(t.testService, isolatedInstances, mock.IsolatedStatus, 2048)
t.mockServer.GenInstancesWithStatus(t.testService, unhealthyInstances, mock.UnhealthyStatus, 4096)
namingpb.RegisterPolarisGRPCServer(t.grpcServer, t.mockServer)
t.grpcListener, err = net.Listen("tcp", fmt.Sprintf("%s:%d", ipAddr, shopPort))
if nil != err {
log.Fatal(fmt.Sprintf("error listening appserver %v", err))
}
log.Printf("appserver listening on %s:%d\n", ipAddr, shopPort)
go func() {
t.grpcServer.Serve(t.grpcListener)
}()
}
//SetUpSuite 结束测试套程序
func (t *ConsumerMeshTestingSuite) TearDownSuite(c *check.C) {
t.grpcServer.Stop()
util.InsertLog(t, c.GetTestLog())
}
//测试获取批量服务
func (t *ConsumerMeshTestingSuite) TestGetServices(c *check.C) {
log.Printf("Start TestGetServices")
defer util.DeleteDir(util.BackupDir)
t.runWithMockTimeout(false, func() {
sdkContext, err := api.InitContextByFile("testdata/consumer.yaml")
c.Assert(err, check.IsNil)
consumer := api.NewConsumerAPIByContext(sdkContext)
defer consumer.Destroy()
time.Sleep(2 * time.Second)
testbus := "ExistBusiness"
//目标服务
serviceToken1 := uuid.New().String()
testService1 := &namingpb.Service{
Name: &wrappers.StringValue{Value: testbus + "2222/" + api.MeshVirtualService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken1},
Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService1)
//辅助服务
serviceToken2 := uuid.New().String()
testService2 := &namingpb.Service{
Name: &wrappers.StringValue{Value: "BUSINESS/" + testbus},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken2},
//Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService2)
request := &api.GetServicesRequest{}
request.FlowID = 1111
request.Namespace = consumerNamespace
request.Business = testbus
request.EnableBusiness = true
startTime := time.Now()
resp, err := consumer.GetServicesByBusiness(request)
endTime := time.Now()
consumeTime := endTime.Sub(startTime)
fmt.Printf("time consume is %v\n", consumeTime)
if nil != err {
fmt.Printf("err recv is %v\n", err)
}
c.Assert(err, check.IsNil)
servicesRecived := resp.GetValue().([]*namingpb.Service)
c.Assert(len(servicesRecived), check.Equals, 1)
log.Printf("TestGetServices done", resp, len(servicesRecived), servicesRecived)
//add one
serviceToken1 = uuid.New().String()
testService1 = &namingpb.Service{
Name: &wrappers.StringValue{Value: testbus + "2222/" + api.MeshVirtualService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken1},
Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService1)
time.Sleep(4 * time.Second)
resp, err = consumer.GetServicesByBusiness(request)
if nil != err {
fmt.Printf("err recv is %v\n", err)
}
c.Assert(err, check.IsNil)
servicesRecived = resp.GetValue().([]*namingpb.Service)
c.Assert(len(servicesRecived), check.Equals, 2)
log.Printf("TestGetServices done", resp, len(servicesRecived))
time.Sleep(2 * time.Second)
})
}
//测试获取网格数据
func (t *ConsumerMeshTestingSuite) TestGetMesh(c *check.C) {
log.Printf("Start TestGetMesh")
meshname := "mesh001"
//service
serviceToken1 := uuid.New().String()
testService1 := &namingpb.Service{
Name: &wrappers.StringValue{Value: meshname},
Namespace: &wrappers.StringValue{Value: ""},
Token: &wrappers.StringValue{Value: serviceToken1},
}
t.mockServer.RegisterService(testService1)
//mesh
t.mockServer.RegisterMesh(&namingpb.Service{ | Services: []*namingpb.MeshService{
{
MeshName: &wrappers.StringValue{Value: meshname},
Service: &wrappers.StringValue{Value: "n"},
Namespace: &wrappers.StringValue{Value: "space"},
},
},
})
//request
mreq := &api.GetMeshRequest{}
//mreq.Namespace = "mesh"
//mreq.Namespace = ""
mreq.MeshId = meshname
sdkContext, err := api.InitContextByFile("testdata/consumer.yaml")
c.Assert(err, check.IsNil)
consumer := api.NewConsumerAPIByContext(sdkContext)
defer consumer.Destroy()
time.Sleep(2 * time.Second)
resp, err := consumer.GetMesh(mreq)
log.Printf("======>", resp, resp.Value.(*namingpb.Mesh).Owners)
//
serviceToken11 := uuid.New().String()
testService11 := &namingpb.Service{
Name: &wrappers.StringValue{Value: meshname},
Namespace: &wrappers.StringValue{Value: "d"},
Token: &wrappers.StringValue{Value: serviceToken11},
}
t.mockServer.RegisterService(testService11)
req := &api.GetInstancesRequest{}
req.Namespace = "dd"
req.Service = meshname
resp2, err2 := consumer.GetInstances(req)
log.Printf("instances: ", resp2, err2)
}
//测试获取网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfig(c *check.C) {
log.Printf("Start TestGetMeshConfig")
t.testGetMeshConfig(c, false, true, true)
}
//测试获取不存在的网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfigNotExist(c *check.C) {
log.Printf("Start TestGetMeshConfigNotExist")
t.testGetMeshConfig(c, false, false, true)
}
//测试获取类型不匹配的网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfigTypeNotMatch(c *check.C) {
log.Printf("Start TestGetMeshConfigTypeNotMatch")
t.testGetMeshConfig(c, false, true, false)
}
//在mockTimeout宏中,执行测试逻辑
func (t *ConsumerMeshTestingSuite) runWithMockTimeout(mockTimeout bool, handle func()) {
t.mockServer.MakeOperationTimeout(mock.OperationDiscoverInstance, mockTimeout)
t.mockServer.MakeOperationTimeout(mock.OperationDiscoverRouting, mockTimeout)
defer func() {
defer t.mockServer.MakeOperationTimeout(mock.OperationDiscoverInstance, | //Namespace: &wrappers.StringValue{Value: "mesh"},
Namespace: &wrappers.StringValue{Value: ""},
}, "", &namingpb.Mesh{
Id: &wrappers.StringValue{Value: meshname},
Owners: &wrappers.StringValue{Value: "bilinhe"}, | random_line_split |
api_consumer_mesh_suite.go | Server *grpc.Server
grpcListener net.Listener
serviceToken string
testService *namingpb.Service
}
//套件名字
func (t *ConsumerMeshTestingSuite) GetName() string {
return "Consumer"
}
//SetUpSuite 启动测试套程序
func (t *ConsumerMeshTestingSuite) SetUpSuite(c *check.C) {
grpcOptions := make([]grpc.ServerOption, 0)
maxStreams := 100000
grpcOptions = append(grpcOptions, grpc.MaxConcurrentStreams(uint32(maxStreams)))
log.Printf("ConsumerMeshTestingSuite SetUpSuite")
// get the grpc server wired up
grpc.EnableTracing = true
ipAddr := consumerIPAddress
shopPort := consumerPort
var err error
t.grpcServer = grpc.NewServer(grpcOptions...)
t.serviceToken = uuid.New().String()
t.mockServer = mock.NewNamingServer()
token := t.mockServer.RegisterServerService(config.ServerDiscoverService)
t.mockServer.RegisterServerInstance(ipAddr, shopPort, config.ServerDiscoverService, token, true)
t.mockServer.RegisterNamespace(&namingpb.Namespace{
Name: &wrappers.StringValue{Value: consumerNamespace},
Comment: &wrappers.StringValue{Value: "for consumer api test"},
Owners: &wrappers.StringValue{Value: "ConsumerAPI"},
})
t.mockServer.RegisterServerServices(ipAddr, shopPort)
t.testService = &namingpb.Service{
Name: &wrappers.StringValue{Value: consumerService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: t.serviceToken},
}
t.mockServer.RegisterService(t.testService)
t.mockServer.GenTestInstances(t.testService, normalInstances)
t.mockServer.GenInstancesWithStatus(t.testService, isolatedInstances, mock.IsolatedStatus, 2048)
t.mockServer.GenInstancesWithStatus(t.testService, unhealthyInstances, mock.UnhealthyStatus, 4096)
namingpb.RegisterPolarisGRPCServer(t.grpcServer, t.mockServer)
t.grpcListener, err = net.Listen("tcp", fmt.Sprintf("%s:%d", ipAddr, shopPort))
if nil != err {
log.Fatal(fmt.Sprintf("error listening appserver %v", err))
}
log.Printf("appserver listening on %s:%d\n", ipAddr, shopPort)
go func() {
t.grpcServer.Serve(t.grpcListener)
}()
}
//SetUpSuite 结束测试套程序
func (t *ConsumerMeshTestingSuite) TearDownSuite(c *check.C) {
t.grpcServer.Stop()
util.InsertLog(t, c.GetTestLog())
}
//测试获取批量服务
func (t *ConsumerMeshTestingSuite) TestGetServices(c *check.C) {
log.Printf("Start TestGetServices")
defer util.DeleteDir(util.BackupDir)
t.runWithMockTimeout(false, func() {
sdkContext, err := api.InitContextByFile("testdata/consumer.yaml")
c.Assert(err, check.IsNil)
consumer := api.NewConsumerAPIByContext(sdkContext)
defer consumer.Destroy()
time.Sleep(2 * time.Second)
testbus := "ExistBusiness"
//目标服务
serviceToken1 := uuid.New().String()
testService1 := &namingpb.Service{
Name: &wrappers.StringValue{Value: testbus + "2222/" + api.MeshVirtualService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken1},
Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService1)
//辅助服务
serviceToken2 := uuid.New().String()
testService2 := &namingpb.Service{
Name: &wrappers.StringValue{Value: "BUSINESS/" + testbus},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken2},
//Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService2)
request := &api.GetServicesRequest{}
request.FlowID = 1111
request.Namespace = consumerNamespace
request.Business = testbus
request.EnableBusiness = true
startTime := time.Now()
resp, err := consumer.GetServicesByBusiness(request)
endTime := time.Now()
consumeTime := endTime.Sub(startTime)
fmt.Printf("time consume is %v\n", consumeTime)
if nil != err {
fmt.Printf("err recv is %v\n", err)
}
c.Assert(err, check.IsNil)
servicesRecived := resp.GetValue().([]*namingpb.Service)
c.Assert(len(servicesRecived), check.Equals, 1)
log.Printf("TestGetServices done", resp, len(servicesRecived), servicesRecived)
//add one
serviceToken1 = uuid.New().String()
testService1 = &namingpb.Service{
Name: &wrappers.StringValue{Value: testbus + "2222/" + api.MeshVirtualService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken1},
Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService1)
time.Sleep(4 * time.Second)
resp, err = consumer.GetServicesByBusiness(request)
if nil != err {
fmt.Printf("err recv is %v\n", err)
}
c.Assert(err, check.IsNil)
servicesRecived = resp.GetValue().([]*namingpb.Service)
c.Assert(len(servicesRecived), check.Equals, 2)
log.Printf("TestGetServices done", resp, len(servicesRecived))
time.Sleep(2 * time.Second)
})
}
//测试获取网格数据
func (t *ConsumerMeshTestingSuite) TestGetMesh(c *check.C) {
log.Printf("Start TestGetMesh")
meshname := "mesh001"
//service
serviceToken1 := uuid.New().String()
testService1 := &namingpb.Service{
Name: &wrappers.StringValue{Value: meshname},
Namespace: &wrappers.StringValue{Value: ""},
Token: &wrappers.StringValue{Value: serviceToken1},
}
t.mockServer.RegisterService(testService1)
//mesh
t.mockServer.RegisterMesh(&namingpb.Service{
//Namespace: &wrappers.StringValue{Value: "mesh"},
Namespace: &wrappers.StringValue{Value: ""},
}, "", &namingpb.Mesh{
Id: &wrappers.StringValue{Value: meshname},
Owners: &wrappers.StringValue{Value: "bilinhe"},
Services: []*namingpb.MeshService{
{
MeshName: &wrappers.StringValue{Value: meshname},
Service: &wrappers.StringValue{Value: "n"},
Namespace: &wrappers.StringValue{Value: "space"},
},
},
})
//request
mreq := &api.GetMeshRequest{}
//mreq.Namespace = "mesh"
//mreq.Namespace = ""
mreq.MeshId = meshname
sdkContext, err := api.InitContextByFile("testdata/consumer.yaml")
c.Assert(err, check.IsNil)
consumer := api.NewConsumerAPIByContext(sdkContext)
defer consumer.Destroy()
time.Sleep(2 * time.Second)
resp, err := consumer.GetMesh(mreq)
log.Printf("======>", resp, resp.Value.(*namingpb.Mesh).Owners)
//
serviceToken11 := uuid.New().String()
testService11 := &namingpb.Service{
Name: &wrappers.StringValue{Value: meshname},
Namespace: &wrappers.StringValue{Value: "d"},
Token: &wrappers.StringValue{Value: serviceToken11},
}
t.mockServer.RegisterService(testService11)
req := &api.GetInstancesRequest{}
req.Namespace = "dd"
req.Service = meshname
resp2, err2 := consumer.GetInstances(req)
log.Printf("instances: ", resp2, err2)
}
//测试获取网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfig(c *check.C) {
log.Printf("Start TestGetMeshConfig")
t.testGetMeshConfig(c, false, true, true)
}
//测试获取不存在的网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfigNotExist(c *check.C) {
log.Printf("Start TestGetMeshConfigNotExist")
t.testGetMeshConfig(c, false, false, true)
}
//测试获取类型不匹配的网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfigTypeNotMatch(c *check.C) {
log.Printf("Start TestGetMeshConfigTypeNotMatch")
t.testGetMeshConfig(c, false, true, false)
}
//在mockTimeout宏中,执行测试逻 | gSuite) runWithMockTimeout(mockTimeout bool, handle func()) {
t.mockServer.MakeOperationTimeout(mock.OperationDiscoverInstance, mockTimeout)
t.mockServer.MakeOperationTimeout(mock.OperationDiscoverRouting, mockTimeout)
defer func() {
defer t.mockServer.MakeOperationTimeout(mock.OperationDiscover | 辑
func (t *ConsumerMeshTestin | identifier_name |
api_consumer_mesh_suite.go | Server *grpc.Server
grpcListener net.Listener
serviceToken string
testService *namingpb.Service
}
//套件名字
func (t *ConsumerMeshTestingSuite) GetName() string {
return "Consumer"
}
//SetUpSuite 启动测试套程序
func (t *ConsumerMeshTestingSuite) SetUpSuite(c *check.C) {
grpcOptions := make([]grpc.ServerOption, 0)
maxStreams := 100000
grpcOptions = append(grpcOptions, grpc.MaxConcurrentStreams(uint32(maxStreams)))
log.Printf("ConsumerMeshTestingSuite SetUpSuite")
// get the grpc server wired up
grpc.EnableTracing = true
ipAddr := consumerIPAddress
shopPort := consumerPort
var err error
t.grpcServer = grpc.NewServer(grpcOptions...)
t.serviceToken = uuid.New().String()
t.mockServer = mock.NewNamingServer()
token := t.mockServer.RegisterServerService(config.ServerDiscoverService)
t.mockServer.RegisterServerInstance(ipAddr, shopPort, config.ServerDiscoverService, token, true)
t.mockServer.RegisterNamespace(&namingpb.Namespace{
Name: &wrappers.StringValue{Value: consumerNamespace},
Comment: &wrappers.StringValue{Value: "for consumer api test"},
Owners: &wrappers.StringValue{Value: "ConsumerAPI"},
})
t.mockServer.RegisterServerServices(ipAddr, shopPort)
t.testService = &namingpb.Service{
Name: &wrappers.StringValue{Value: consumerService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: t.serviceToken},
}
t.mockServer.RegisterService(t.testService)
t.mockServer.GenTestInstances(t.testService, normalInstances)
t.mockServer.GenInstancesWithStatus(t.testService, isolatedInstances, mock.IsolatedStatus, 2048)
t.mockServer.GenInstancesWithStatus(t.testService, unhealthyInstances, mock.UnhealthyStatus, 4096)
namingpb.RegisterPolarisGRPCServer(t.grpcServer, t.mockServer)
t.grpcListener, err = net.Listen("tcp", fmt.Sprintf("%s:%d", ipAddr, shopPort))
if nil != err {
log.Fatal(fmt.Sprintf("error listening appserver %v", err))
}
log.Printf("appserver listening on %s:%d\n", ipAddr, shopPort)
go func() {
t.grpcServer.Serve(t.grpcListener)
}()
}
//SetUpSuite 结束测试套程序
func (t *ConsumerMeshTestingSuite) TearDownSuite(c *check.C) {
t.grpcServer.Stop()
util.InsertLog(t, c.GetTestLog())
}
//测试获取批量服务
func (t *ConsumerMeshTestingSuite) TestGetServices(c *check.C) {
log.Printf("Start TestGetServices")
defer util.DeleteDir(util.BackupDir)
t.runWithMockTimeout(false, func() {
sdkContext, err := api.InitContextByFile("testdata/consumer.yaml")
c.Assert(err, check.IsNil)
consumer := api.NewConsumerAPIByContext(sdkContext)
defer consumer.Destroy()
time.Sleep(2 * time.Second)
testbus := "ExistBusiness"
//目标服务
serviceToken1 := uuid.New().String()
testService1 := &namingpb.Service{
Name: &wrappers.StringValue{Value: testbus + "2222/" + api.MeshVirtualService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken1},
Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService1)
//辅助服务
serviceToken2 := uuid.New().String()
testService2 := &namingpb.Service{
Name: &wrappers.StringValue{Value: "BUSINESS/" + testbus},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken2},
//Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService2)
request := &api.GetServicesRequest{}
request.FlowID = 1111
request.Namespace = consumerNamespace
request.Business = testbus
request.EnableBusiness = true
startTime := time.Now()
resp, err := consumer.GetServicesByBusiness(request)
endTime := time.Now()
consumeTime := endTime.Sub(startTime)
fmt.Printf("time consume is %v\n", consumeTime)
if nil != err {
fmt.Printf("err recv is %v\n", err)
}
c.Assert(err, check.IsNil)
servicesRecived := resp.GetValue().([]*namingpb.Service)
c.Assert(len(servicesRecived), check.Equals, 1)
log.Printf("TestGetServices done", resp, len(servicesRecived), servicesRecived)
//add one
serviceToken1 = uuid.New().String()
testService1 = &namingpb.Service{
Name: &wrappers.StringValue{Value: testbus + "2222/" + api.MeshVirtualService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken1},
Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService1)
time.Sleep(4 * time.Second)
resp, err = consumer.GetServicesByBusiness(request)
if nil != err {
fmt.Printf("err recv is %v\n", err)
}
c.Assert(err, check.IsNil)
servicesRecived = resp.GetValue().([]*namingpb.Service)
c.Assert(len(servicesRecived), check.Equals, 2)
log.Printf("TestGetServices done", resp, len(servicesRecived))
time.Sleep(2 * time.Second)
})
}
//测试获取网格数据
func (t *ConsumerMeshTestingSuite) TestGetMesh(c *check.C) {
log.Printf("Start TestGetMesh")
meshname := "mesh001"
//service
serviceToken1 := uuid.New( | },
})
//request
mreq := &api.GetMeshRequest{}
//mreq.Namespace = "mesh"
//mreq.Namespace = ""
mreq.MeshId = meshname
sdkContext, err := api.InitContextByFile("testdata/consumer.yaml")
c.Assert(err, check.IsNil)
consumer := api.NewConsumerAPIByContext(sdkContext)
defer consumer.Destroy()
time.Sleep(2 * time.Second)
resp, err := consumer.GetMesh(mreq)
log.Printf("======>", resp, resp.Value.(*namingpb.Mesh).Owners)
//
serviceToken11 := uuid.New().String()
testService11 := &namingpb.Service{
Name: &wrappers.StringValue{Value: meshname},
Namespace: &wrappers.StringValue{Value: "d"},
Token: &wrappers.StringValue{Value: serviceToken11},
}
t.mockServer.RegisterService(testService11)
req := &api.GetInstancesRequest{}
req.Namespace = "dd"
req.Service = meshname
resp2, err2 := consumer.GetInstances(req)
log.Printf("instances: ", resp2, err2)
}
//测试获取网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfig(c *check.C) {
log.Printf("Sta
rt TestGetMeshConfig")
t.testGetMeshConfig(c, false, true, true)
}
//测试获取不存在的网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfigNotExist(c *check.C) {
log.Printf("Start TestGetMeshConfigNotExist")
t.testGetMeshConfig(c, false, false, true)
}
//测试获取类型不匹配的网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfigTypeNotMatch(c *check.C) {
log.Printf("Start TestGetMeshConfigTypeNotMatch")
t.testGetMeshConfig(c, false, true, false)
}
//在mockTimeout宏中,执行测试逻辑
func (t *ConsumerMeshTestingSuite) runWithMockTimeout(mockTimeout bool, handle func()) {
t.mockServer.MakeOperationTimeout(mock.OperationDiscoverInstance, mockTimeout)
t.mockServer.MakeOperationTimeout(mock.OperationDiscoverRouting, mockTimeout)
defer func() {
defer t.mockServer.MakeOperationTimeout(mock.Operation | ).String()
testService1 := &namingpb.Service{
Name: &wrappers.StringValue{Value: meshname},
Namespace: &wrappers.StringValue{Value: ""},
Token: &wrappers.StringValue{Value: serviceToken1},
}
t.mockServer.RegisterService(testService1)
//mesh
t.mockServer.RegisterMesh(&namingpb.Service{
//Namespace: &wrappers.StringValue{Value: "mesh"},
Namespace: &wrappers.StringValue{Value: ""},
}, "", &namingpb.Mesh{
Id: &wrappers.StringValue{Value: meshname},
Owners: &wrappers.StringValue{Value: "bilinhe"},
Services: []*namingpb.MeshService{
{
MeshName: &wrappers.StringValue{Value: meshname},
Service: &wrappers.StringValue{Value: "n"},
Namespace: &wrappers.StringValue{Value: "space"},
}, | identifier_body |
api_consumer_mesh_suite.go | grpcServer *grpc.Server
grpcListener net.Listener
serviceToken string
testService *namingpb.Service
}
//套件名字
func (t *ConsumerMeshTestingSuite) GetName() string {
return "Consumer"
}
//SetUpSuite 启动测试套程序
func (t *ConsumerMeshTestingSuite) SetUpSuite(c *check.C) {
grpcOptions := make([]grpc.ServerOption, 0)
maxStreams := 100000
grpcOptions = append(grpcOptions, grpc.MaxConcurrentStreams(uint32(maxStreams)))
log.Printf("ConsumerMeshTestingSuite SetUpSuite")
// get the grpc server wired up
grpc.EnableTracing = true
ipAddr := consumerIPAddress
shopPort := consumerPort
var err error
t.grpcServer = grpc.NewServer(grpcOptions...)
t.serviceToken = uuid.New().String()
t.mockServer = mock.NewNamingServer()
token := t.mockServer.RegisterServerService(config.ServerDiscoverService)
t.mockServer.RegisterServerInstance(ipAddr, shopPort, config.ServerDiscoverService, token, true)
t.mockServer.RegisterNamespace(&namingpb.Namespace{
Name: &wrappers.StringValue{Value: consumerNamespace},
Comment: &wrappers.StringValue{Value: "for consumer api test"},
Owners: &wrappers.StringValue{Value: "ConsumerAPI"},
})
t.mockServer.RegisterServerServices(ipAddr, shopPort)
t.testService = &namingpb.Service{
Name: &wrappers.StringValue{Value: consumerService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: t.serviceToken},
}
t.mockServer.RegisterService(t.testService)
t.mockServer.GenTestInstances(t.testService, normalInstances)
t.mockServer.GenInstancesWithStatus(t.testService, isolatedInstances, mock.IsolatedStatus, 2048)
t.mockServer.GenInstancesWithStatus(t.testService, unhealthyInstances, mock.UnhealthyStatus, 4096)
namingpb.RegisterPolarisGRPCServer(t.grpcServer, t.mockServer)
t.grpcListener, err = net.Listen("tcp", fmt.Sprintf("%s:%d", ipAddr, shopPort))
if nil != err {
log.Fatal(fmt.Sprintf("error listening appserver %v", err))
}
log.Printf("appserver listening on %s:%d\n", ipAddr, shopPort)
go func() {
t.grpcServer.Serve(t.grpcListener)
}()
}
//SetUpSuite 结束测试套程序
func (t *ConsumerMeshTestingSuite) TearDownSuite(c *check.C) {
t.grpcServer.Stop()
util.InsertLog(t, c.GetTestLog())
}
//测试获取批量服务
func (t *ConsumerMeshTestingSuite) TestGetServices(c *check.C) {
log.Printf("Start TestGetServices")
defer util.DeleteDir(util.BackupDir)
t.runWithMockTimeout(false, func() {
sdkContext, err := api.InitContextByFile("testdata/consumer.yaml")
c.Assert(err, check.IsNil)
consumer := api.NewConsumerAPIByContext(sdkContext)
defer consumer.Destroy()
time.Sleep(2 * time.Second)
testbus := "ExistBusiness"
//目标服务
serviceToken1 := uuid.New().String()
testService1 := &namingpb.Service{
Name: &wrappers.StringValue{Value: testbus + "2222/" + api.MeshVirtualService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken1},
Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService1)
//辅助服务
serviceToken2 := uuid.New().String()
testService2 := &namingpb.Service{
Name: &wrappers.StringValue{Value: "BUSINESS/" + testbus},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken2},
//Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService2)
request := &api.GetServicesRequest{}
request.FlowID = 1111
request.Namespace = consumerNamespace
request.Business = testbus
request.EnableBusiness = true
startTime := time.Now()
resp, err := consumer.GetServicesByBusiness(request)
endTime := time.Now()
consumeTime := endTime.Sub(startTime)
fmt.Printf("time consume is %v\n", consumeTime)
if nil != err {
fmt.Printf("err recv is %v\n", err)
}
c.Assert(err, check.IsNil)
servicesRecived := resp.GetValue().([]*namingpb.Service)
c.Assert(len(servicesRecived), check.Equals, 1)
log.Printf("TestGetServices done", resp, len(servicesRecived), servicesRecived)
//add one
serviceToken1 = uuid.New().String()
testService1 = &namingpb.Service{
Name: &wrappers.StringValue{Value: testbus + "2222/" + api.MeshVirtualService},
Namespace: &wrappers.StringValue{Value: consumerNamespace},
Token: &wrappers.StringValue{Value: serviceToken1},
Business: &wrappers.StringValue{Value: testbus},
}
t.mockServer.RegisterService(testService1)
time.Sleep(4 * time.Second)
resp, err = consumer.GetServicesByBusiness(request)
if nil != err {
fmt.Printf("err recv is %v\n", err)
}
c.Assert(err, check.IsNil)
serv | ervice)
c.Assert(len(servicesRecived), check.Equals, 2)
log.Printf("TestGetServices done", resp, len(servicesRecived))
time.Sleep(2 * time.Second)
})
}
//测试获取网格数据
func (t *ConsumerMeshTestingSuite) TestGetMesh(c *check.C) {
log.Printf("Start TestGetMesh")
meshname := "mesh001"
//service
serviceToken1 := uuid.New().String()
testService1 := &namingpb.Service{
Name: &wrappers.StringValue{Value: meshname},
Namespace: &wrappers.StringValue{Value: ""},
Token: &wrappers.StringValue{Value: serviceToken1},
}
t.mockServer.RegisterService(testService1)
//mesh
t.mockServer.RegisterMesh(&namingpb.Service{
//Namespace: &wrappers.StringValue{Value: "mesh"},
Namespace: &wrappers.StringValue{Value: ""},
}, "", &namingpb.Mesh{
Id: &wrappers.StringValue{Value: meshname},
Owners: &wrappers.StringValue{Value: "bilinhe"},
Services: []*namingpb.MeshService{
{
MeshName: &wrappers.StringValue{Value: meshname},
Service: &wrappers.StringValue{Value: "n"},
Namespace: &wrappers.StringValue{Value: "space"},
},
},
})
//request
mreq := &api.GetMeshRequest{}
//mreq.Namespace = "mesh"
//mreq.Namespace = ""
mreq.MeshId = meshname
sdkContext, err := api.InitContextByFile("testdata/consumer.yaml")
c.Assert(err, check.IsNil)
consumer := api.NewConsumerAPIByContext(sdkContext)
defer consumer.Destroy()
time.Sleep(2 * time.Second)
resp, err := consumer.GetMesh(mreq)
log.Printf("======>", resp, resp.Value.(*namingpb.Mesh).Owners)
//
serviceToken11 := uuid.New().String()
testService11 := &namingpb.Service{
Name: &wrappers.StringValue{Value: meshname},
Namespace: &wrappers.StringValue{Value: "d"},
Token: &wrappers.StringValue{Value: serviceToken11},
}
t.mockServer.RegisterService(testService11)
req := &api.GetInstancesRequest{}
req.Namespace = "dd"
req.Service = meshname
resp2, err2 := consumer.GetInstances(req)
log.Printf("instances: ", resp2, err2)
}
//测试获取网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfig(c *check.C) {
log.Printf("Start TestGetMeshConfig")
t.testGetMeshConfig(c, false, true, true)
}
//测试获取不存在的网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfigNotExist(c *check.C) {
log.Printf("Start TestGetMeshConfigNotExist")
t.testGetMeshConfig(c, false, false, true)
}
//测试获取类型不匹配的网格规则
func (t *ConsumerMeshTestingSuite) TestGetMeshConfigTypeNotMatch(c *check.C) {
log.Printf("Start TestGetMeshConfigTypeNotMatch")
t.testGetMeshConfig(c, false, true, false)
}
//在mockTimeout宏中,执行测试逻辑
func (t *ConsumerMeshTestingSuite) runWithMockTimeout(mockTimeout bool, handle func()) {
t.mockServer.MakeOperationTimeout(mock.OperationDiscoverInstance, mockTimeout)
t.mockServer.MakeOperationTimeout(mock.OperationDiscoverRouting, mockTimeout)
defer func() {
defer t.mockServer.MakeOperationTimeout(mock.OperationDiscover | icesRecived = resp.GetValue().([]*namingpb.S | conditional_block |
context.rs | Def>> {
self.ct_defs
.get(&id)
.filter(|cdef| cdef.is_populated())
.map(|cdef| &cdef.def)
}
pub fn defs(&self) -> impl Iterator<Item = (CtId, &Arc<CtDef>)> {
self.ct_defs
.iter()
.filter(|(_, cdef)| cdef.is_populated())
.map(|(id, cdef)| (*id, &cdef.def))
}
pub fn populate<T>(
&mut self,
src: &T,
set: &impl ModuleSet,
) -> (T::Dest, impl Iterator<Item = (CtId, &Arc<CtDef>)>)
where
T: simplifier::Simplify,
T::Dest: traverser::Traverse + rewriter::Rewrite,
{
let mut dest = simplifier::simplify(src, &mut SimplifierContext::new(self, set));
normalizer::normalize(&mut dest, self);
let generation = self.next_generation;
self.next_generation.0 += 1;
// Assign generation information to each CtDef generated in this populate pass.
let mut generation_defs = GenerationCollector::collect(&dest, self);
for id in generation_defs.iter() {
self.ct_defs.get_mut(id).unwrap().generation = Some(generation);
}
DataExpansionComputetor::compute(generation, &mut generation_defs, self);
let mut target = CanonicalizeTarget::new(&generation_defs, &mut dest, &mut self.ct_defs);
data_expander::expand(&mut target, &self.data_expansions);
branch_expander::expand(
&mut target,
&mut MatchExpander::new(&mut self.rt_id_gen, &self.data_expansions),
);
heap2stack::run(&mut target);
// Possible optimizations that are not implemented:
// * Closure inlining: we can inline closure immediate calls like $<f>{<env>}(..)
// * More escape analysis to promote heap allocations to stack allocations
let ct_defs = &self.ct_defs;
let related_defs =
generation_defs
.into_iter()
.filter_map(move |id| match ct_defs.get(&id).unwrap() {
cdef if cdef.is_populated() => Some((id, &cdef.def)),
_ => None,
});
(dest, related_defs)
}
fn bind_ct(&mut self, key: CtKey, build: impl FnOnce(&mut Self) -> Option<CtDef>) -> CtId {
match self.ct_mapping.entry(key) {
hash_map::Entry::Occupied(e) => *e.get(),
hash_map::Entry::Vacant(e) => {
let id = self.ct_id_gen.next();
e.insert(id);
if let Some(def) = build(self) {
let phase = match def {
CtDef::Generic(_, _) => Phase::Generalized,
_ => Phase::Instantiated,
};
self.ct_defs
.insert(id, ContextCtDef::new(phase, None, Arc::new(def)));
}
id
}
}
}
}
impl normalizer::Env for Context {
fn instantiate(&mut self, id: CtId, args: Vec<Ct>) -> CtId {
self.bind_ct(CtKey::Inst(id, args.clone()), move |self_| {
let def = match self_.ct_defs.get(&id) {
Some(cdef) => &cdef.def,
None => panic!("Attempt to instantiate {}: which is not a definition", id),
};
match def.as_ref() {
CtDef::Generic(params, ct) => {
assert_eq!(params.len(), args.len());
let mut ct = ct.as_ref().clone();
rewriter::replace_ct(&mut ct, params.iter().copied().zip(args).collect());
Some(ct)
}
_ => panic!(
"Attempt to instantiate {}: which is not a generic definition",
id
),
}
})
}
fn get_processing_ct_def(&mut self, id: CtId) -> Option<normalizer::ProcessingCtDef> {
let cdef = self.ct_defs.get(&id)?;
let mut def = Arc::clone(&cdef.def);
let is_normalized = match cdef.phase {
Phase::Generalized => false,
Phase::Instantiated => {
self.ct_defs.get_mut(&id).unwrap().phase = Phase::Normalizing;
def = Arc::new({
let mut def = def.as_ref().clone();
normalizer::normalize(&mut def, self);
def
});
self.ct_defs.get_mut(&id).unwrap().phase = Phase::Normalized;
self.ct_defs.get_mut(&id).unwrap().def = Arc::clone(&def);
true
}
Phase::Normalizing => false,
Phase::Normalized => true,
};
Some(normalizer::ProcessingCtDef { is_normalized, def })
}
fn alloc_ct(&mut self) -> CtId {
self.ct_id_gen.next()
}
fn define_ct(&mut self, id: CtId, def: CtDef) {
let phase = match def {
CtDef::Generic(_, _) => Phase::Generalized,
_ => Phase::Instantiated,
};
let def = ContextCtDef::new(phase, None, Arc::new(def));
if self.ct_defs.insert(id, def).is_some() {
panic!("Duplicate definition of {}", id);
}
}
fn alloc_rt(&mut self) -> RtId {
self.rt_id_gen.next()
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Hash)]
enum CtKey {
Construct(ast::Construct),
Inst(CtId, Vec<Ct>),
}
type RtKey = ast::Construct;
#[derive(Debug, Clone, new)]
struct ContextCtDef {
phase: Phase,
generation: Option<Generation>,
def: Arc<CtDef>,
}
impl ContextCtDef {
fn is_populated(&self) -> bool {
self.phase == Phase::Normalized
&& self.generation.is_some()
&& !matches!(*self.def, CtDef::Data(_))
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)]
enum Phase {
Generalized,
Instantiated,
Normalizing,
Normalized,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)]
struct Generation(usize);
#[derive(Debug, new)]
struct SimplifierContext<'a, 'm, M> {
context: &'a mut Context,
module_set: &'m M,
}
impl<'a, 'm, M: ModuleSet> simplifier::Env<'m> for SimplifierContext<'a, 'm, M> {
type ModuleSet = M;
fn module_set(&self) -> &'m Self::ModuleSet {
self.module_set
}
fn alloc_ct(&mut self) -> CtId {
self.context.ct_id_gen.next()
}
fn issue_ct(&mut self, construct: impl Into<ast::Construct>) -> CtId {
let module_set = self.module_set;
let construct = construct.into();
self.context.bind_ct(CtKey::Construct(construct), |ctx| {
SimplifierContext::new(ctx, module_set).simplify_def(construct)
})
}
fn alloc_rt(&mut self) -> RtId {
self.context.rt_id_gen.next()
}
fn issue_rt(&mut self, construct: impl Into<ast::Construct>) -> RtId {
match self.context.rt_mapping.entry(construct.into()) {
hash_map::Entry::Occupied(e) => *e.get(),
hash_map::Entry::Vacant(e) => {
let id = self.context.rt_id_gen.next();
e.insert(id);
id
}
}
}
}
#[derive(Debug, new)]
struct DataExpansionComputetor<'a> {
ct_id_gen: &'a mut CtIdGen,
data_expansions: &'a mut HashMap<CtId, data_expander::DataExpansion>,
#[new(default)]
defs: HashMap<CtId, CtDef>,
}
impl<'a> DataExpansionComputetor<'a> {
fn compute(generation: Generation, generation_defs: &mut BTreeSet<CtId>, ctx: &'a mut Context) {
let mut env = Self::new(&mut ctx.ct_id_gen, &mut ctx.data_expansions);
data_expander::compute(
{
let ct_defs = &ctx.ct_defs;
generation_defs
.iter()
.filter_map(move |id| match *ct_defs.get(id).unwrap().def {
CtDef::Data(ref data) => Some((*id, data)),
_ => None,
})
},
&mut env,
);
for (id, def) in env.defs {
let cdef = ContextCtDef::new(Phase::Normalized, Some(generation), Arc::new(def));
ctx.ct_defs.insert(id, cdef);
generation_defs.insert(id);
}
}
}
impl<'a> data_expander::Env for DataExpansionComputetor<'a> {
fn add_def(&mut self, def: CtDef) -> CtId {
let id = self.ct_id_gen.next();
self.defs.insert(id, def);
id
} | random_line_split |
||
context.rs | ions: HashMap::new(),
next_generation: Generation(0),
}
}
pub fn def(&self, id: CtId) -> Option<&Arc<CtDef>> {
self.ct_defs
.get(&id)
.filter(|cdef| cdef.is_populated())
.map(|cdef| &cdef.def)
}
pub fn defs(&self) -> impl Iterator<Item = (CtId, &Arc<CtDef>)> {
self.ct_defs
.iter()
.filter(|(_, cdef)| cdef.is_populated())
.map(|(id, cdef)| (*id, &cdef.def))
}
pub fn populate<T>(
&mut self,
src: &T,
set: &impl ModuleSet,
) -> (T::Dest, impl Iterator<Item = (CtId, &Arc<CtDef>)>)
where
T: simplifier::Simplify,
T::Dest: traverser::Traverse + rewriter::Rewrite,
{
let mut dest = simplifier::simplify(src, &mut SimplifierContext::new(self, set));
normalizer::normalize(&mut dest, self);
let generation = self.next_generation;
self.next_generation.0 += 1;
// Assign generation information to each CtDef generated in this populate pass.
let mut generation_defs = GenerationCollector::collect(&dest, self);
for id in generation_defs.iter() {
self.ct_defs.get_mut(id).unwrap().generation = Some(generation);
}
DataExpansionComputetor::compute(generation, &mut generation_defs, self);
let mut target = CanonicalizeTarget::new(&generation_defs, &mut dest, &mut self.ct_defs);
data_expander::expand(&mut target, &self.data_expansions);
branch_expander::expand(
&mut target,
&mut MatchExpander::new(&mut self.rt_id_gen, &self.data_expansions),
);
heap2stack::run(&mut target);
// Possible optimizations that are not implemented:
// * Closure inlining: we can inline closure immediate calls like $<f>{<env>}(..)
// * More escape analysis to promote heap allocations to stack allocations
let ct_defs = &self.ct_defs;
let related_defs =
generation_defs
.into_iter()
.filter_map(move |id| match ct_defs.get(&id).unwrap() {
cdef if cdef.is_populated() => Some((id, &cdef.def)),
_ => None,
});
(dest, related_defs)
}
fn bind_ct(&mut self, key: CtKey, build: impl FnOnce(&mut Self) -> Option<CtDef>) -> CtId {
match self.ct_mapping.entry(key) {
hash_map::Entry::Occupied(e) => *e.get(),
hash_map::Entry::Vacant(e) => {
let id = self.ct_id_gen.next();
e.insert(id);
if let Some(def) = build(self) {
let phase = match def {
CtDef::Generic(_, _) => Phase::Generalized,
_ => Phase::Instantiated,
};
self.ct_defs
.insert(id, ContextCtDef::new(phase, None, Arc::new(def)));
}
id
}
}
}
}
impl normalizer::Env for Context {
fn instantiate(&mut self, id: CtId, args: Vec<Ct>) -> CtId {
self.bind_ct(CtKey::Inst(id, args.clone()), move |self_| {
let def = match self_.ct_defs.get(&id) {
Some(cdef) => &cdef.def,
None => panic!("Attempt to instantiate {}: which is not a definition", id),
};
match def.as_ref() {
CtDef::Generic(params, ct) => {
assert_eq!(params.len(), args.len());
let mut ct = ct.as_ref().clone();
rewriter::replace_ct(&mut ct, params.iter().copied().zip(args).collect());
Some(ct)
}
_ => panic!(
"Attempt to instantiate {}: which is not a generic definition",
id
),
}
})
}
fn get_processing_ct_def(&mut self, id: CtId) -> Option<normalizer::ProcessingCtDef> {
let cdef = self.ct_defs.get(&id)?;
let mut def = Arc::clone(&cdef.def);
let is_normalized = match cdef.phase {
Phase::Generalized => false,
Phase::Instantiated => {
self.ct_defs.get_mut(&id).unwrap().phase = Phase::Normalizing;
def = Arc::new({
let mut def = def.as_ref().clone();
normalizer::normalize(&mut def, self);
def
});
self.ct_defs.get_mut(&id).unwrap().phase = Phase::Normalized;
self.ct_defs.get_mut(&id).unwrap().def = Arc::clone(&def);
true
}
Phase::Normalizing => false,
Phase::Normalized => true,
};
Some(normalizer::ProcessingCtDef { is_normalized, def })
}
fn alloc_ct(&mut self) -> CtId {
self.ct_id_gen.next()
}
fn define_ct(&mut self, id: CtId, def: CtDef) {
let phase = match def {
CtDef::Generic(_, _) => Phase::Generalized,
_ => Phase::Instantiated,
};
let def = ContextCtDef::new(phase, None, Arc::new(def));
if self.ct_defs.insert(id, def).is_some() {
panic!("Duplicate definition of {}", id);
}
}
fn alloc_rt(&mut self) -> RtId {
self.rt_id_gen.next()
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Hash)]
enum CtKey {
Construct(ast::Construct),
Inst(CtId, Vec<Ct>),
}
type RtKey = ast::Construct;
#[derive(Debug, Clone, new)]
struct ContextCtDef {
phase: Phase,
generation: Option<Generation>,
def: Arc<CtDef>,
}
impl ContextCtDef {
fn is_populated(&self) -> bool {
self.phase == Phase::Normalized
&& self.generation.is_some()
&& !matches!(*self.def, CtDef::Data(_))
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)]
enum Phase {
Generalized,
Instantiated,
Normalizing,
Normalized,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)]
struct Generation(usize);
#[derive(Debug, new)]
struct SimplifierContext<'a, 'm, M> {
context: &'a mut Context,
module_set: &'m M,
}
impl<'a, 'm, M: ModuleSet> simplifier::Env<'m> for SimplifierContext<'a, 'm, M> {
type ModuleSet = M;
fn module_set(&self) -> &'m Self::ModuleSet {
self.module_set
}
fn alloc_ct(&mut self) -> CtId {
self.context.ct_id_gen.next()
}
fn issue_ct(&mut self, construct: impl Into<ast::Construct>) -> CtId {
let module_set = self.module_set;
let construct = construct.into();
self.context.bind_ct(CtKey::Construct(construct), |ctx| {
SimplifierContext::new(ctx, module_set).simplify_def(construct)
})
}
fn alloc_rt(&mut self) -> RtId {
self.context.rt_id_gen.next()
}
fn issue_rt(&mut self, construct: impl Into<ast::Construct>) -> RtId {
match self.context.rt_mapping.entry(construct.into()) {
hash_map::Entry::Occupied(e) => *e.get(),
hash_map::Entry::Vacant(e) => {
let id = self.context.rt_id_gen.next();
e.insert(id);
id
}
}
}
}
#[derive(Debug, new)]
struct DataExpansionComputetor<'a> {
ct_id_gen: &'a mut CtIdGen,
data_expansions: &'a mut HashMap<CtId, data_expander::DataExpansion>,
#[new(default)]
defs: HashMap<CtId, CtDef>,
}
impl<'a> DataExpansionComputetor<'a> {
fn compute(generation: Generation, generation_defs: &mut BTreeSet<CtId>, ctx: &'a mut Context) {
let mut env = Self::new(&mut ctx.ct_id_gen, &mut ctx.data_expansions);
data_expander::compute(
{
let ct_defs = &ctx.ct_defs;
generation_defs
.iter()
.filter_map(move |id| match *ct_defs.get(id).unwrap().def {
CtDef::Data(ref data) => Some((*id, data)),
_ => None,
})
},
&mut env,
);
for (id, def) in env.defs {
let cdef = ContextCtDef::new(Phase::Normalized, Some(generation), Arc::new(def));
ctx.ct_defs.insert(id, cdef);
generation_defs.insert(id);
}
}
}
impl<'a> data_expander::Env for DataExpansionComputetor<'a> {
fn | add_def | identifier_name |
|
switching_jobmanager.py | import redis
import simplejson
import time
import operator
from cryptokit import bits_to_difficulty
from gevent.event import Event
from powerpool.lib import loop
from powerpool.jobmanagers import Jobmanager
from binascii import hexlify
class MonitorNetworkMulti(Jobmanager):
def __init__(self, config):
self._configure(config)
super(Jobmanager, self).__init__()
# Since some MonitorNetwork objs are polling and some aren't....
self.gl_methods = ['update_profit']
# Child jobmanagers
self.jobmanagers = {}
self.price_data = {}
self.profit_data = {}
self.next_network = None
self.current_network = None
# Currently active jobs keyed by their unique ID
self.jobs = {}
self.new_job = Event()
self.redis = redis.Redis(**self.config['redis'])
@property
def latest_job(self):
""" Proxy the jobmanager we're currently mining ons job """
return self.jobmanagers[self.current_network].latest_job
@property
def status(self):
""" For display in the http monitor """
return dict(price_data=self.price_data,
profit_data=self.profit_data,
next_network=self.next_network,
current_network=self.current_network)
@loop(interval='profit_poll_int')
def update_profit(self):
""" Continually check redis for new profit information """
# Acessing Redis can cause greenlet switches because new jobs. We don't
# want to potentially switch jobs multiple times quickly, so we update
# the profitability information all at once after the loop to avoid
# multiple network switches
new_price_data = {}
for manager in self.jobmanagers.itervalues():
currency = manager.config['currency']
pscore = self.redis.get("{}_profit".format(currency))
# Deserialize
if pscore:
try:
pscore = simplejson.loads(pscore, use_decimal=True)
except Exception:
self.logger.warn(
"Error parsing profit score for {}! Setting it to 0.."
.format(currency))
pscore = 0
pass
# If no score was grabbed, pass a 0 value score
else:
self.logger.warn("Unable to grab profit info for {}!"
.format(currency))
pscore = 0
ratio = self.redis.get("{}_ratio".format(currency)) or 1.0
ratio = float(ratio)
# Only set updated if it actually changed
if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio:
new_price_data[currency] = (pscore, ratio, time.time())
# If we have some new information, adjust accordingly
if new_price_data:
self.logger.info("Updated price information for {}"
.format(new_price_data.keys()))
# Atomic update in gevent
self.price_data.update(new_price_data)
# Update all the profit info. No preemption, just maths
for currency in self.jobmanagers.iterkeys():
self.update_profitability(currency)
self.logger.debug(
"Re-checking best network after new price data for {}"
.format(new_price_data.keys()))
self.check_best()
def check_best(self):
""" Assuming that `profit_data` is completely up to date, evaluate the
most profitable network and switch immediately if there's a big enough
difference. Otherwise set it to be changed at next block notification.
"""
# Get the most profitable network based on our current data
new_best = max(self.profit_data.iteritems(),
key=operator.itemgetter(1))[0]
if self.current_network is None:
self.logger.info(
"No active network, so switching to {} with profit of {:,.4f}"
.format(new_best, self.profit_data[new_best]))
self.next_network = new_best
self.switch_network()
return
# If the currently most profitable network is 120% the profitability
# of what we're mining on, we should switch immediately
margin_switch = self.config['margin_switch']
if (margin_switch and
self.profit_data[self.next_network] >
(self.profit_data[self.current_network] * margin_switch)):
self.logger.info(
"Network {} {:,.4f} now more profitable than current network "
"{} {:,.4f} by a fair margin. Switching NOW."
.format(new_best, self.profit_data[new_best], self.current_network,
self.profit_data[self.current_network]))
self.next_network = new_best
self.switch_network()
return
if new_best != self.next_network:
self.logger.info(
"Network {} {:,.4f} now more profitable than current best "
"{} {:,.4f}. Switching on next block from current network {}."
.format(new_best, self.profit_data[new_best], self.next_network,
self.profit_data[self.next_network], self.current_network))
self.next_network = new_best
return
self.logger.debug("Network {} {:,.4f} still most profitable"
.format(new_best, self.profit_data[new_best]))
def switch_network(self):
""" Pushes a network change to the user if it's needed """
if self.next_network != self.current_network:
|
return False
def update_profitability(self, currency):
""" Recalculates the profitability for a specific currency """
jobmanager = self.jobmanagers[currency]
last_job = jobmanager.latest_job
pscore, ratio, _ = self.price_data[currency]
# We can't update if we don't have a job and profit data
if last_job is None or pscore is None:
return False
max_blockheight = jobmanager.config['max_blockheight']
if max_blockheight is not None and last_job.block_height >= max_blockheight:
self.profit_data[currency] = 0
self.logger.debug(
"{} height {} is >= the configured maximum blockheight of {}, "
"setting profitability to 0."
.format(currency, last_job.block_height, max_blockheight))
return True
block_value = last_job.total_value / 100000000.0
diff = bits_to_difficulty(hexlify(last_job.bits))
self.profit_data[currency] = (block_value * float(pscore) / diff) * ratio * 1000000
self.logger.debug(
"Updating {} profit data;\n\tblock_value {};\n\tavg_price {:,.8f}"
";\n\tdiff {};\n\tratio {};\n\tresult {}"
.format(currency, block_value, float(pscore), diff,
ratio, self.profit_data[currency]))
self.manager.log_event("{name}.profitability.{curr}:{metric}|g"
.format(name=self.manager.config['procname'],
curr=currency,
metric=self.profit_data[currency]))
return True
def new_job_notif(self, event):
if not hasattr('job', event):
self.logger.info("No blocks mined yet, skipping switch logic")
return
currency = event.job.currency
flush = event.job.type == 0
if currency == self.current_network:
self.logger.info("Recieved new job on most profitable network {}"
.format(currency))
# See if we need to switch now that we're done with that block. If
# not, push a new job on this network
if not self.switch_network():
self.new_job.job = event.job
self.new_job.set()
self.new_job.clear()
# If we're recieving a new block then diff has changed, so update the
# network profit and recompute best network
if flush and self.update_profitability(currency):
self.logger.debug("Re-checking best network after new job from {}"
.format(currency))
self.check_best()
def start(self):
Jobmanager.start(self)
self.config['jobmanagers'] = set(self.config['jobmanagers'])
found_managers = set()
for manager in self.manager.component_types['Jobmanager']:
if manager.key in self.config['jobmanagers']:
currency = manager.config['currency']
self.jobmanagers[currency] = manager
self.profit_data[currency] = 0
self.price_data[currency] = (None, None, None)
found_managers.add(manager.key)
manager.new_job.rawlink(self.new_job_notif)
for monitor in self.config['jobmanagers'] - found_managers:
self.logger.error("Unable to locate Jobmanager(s) '{}'".format(monitor))
| job = self.jobmanagers[self.next_network].latest_job
if job is None:
self.logger.error(
"Tried to switch network to {} that has no job!"
.format(self.next_network))
return
if self.current_network:
self.logger.info(
"Switching from {} {:,.4f} -> {} {:,.4f} and pushing job NOW"
.format(self.current_network, self.profit_data[self.current_network],
self.next_network, self.profit_data[self.next_network]))
self.current_network = self.next_network
job.type = 0
self.new_job.job = job
self.new_job.set()
self.new_job.clear()
return True | conditional_block |
switching_jobmanager.py | import redis
import simplejson
import time
import operator
from cryptokit import bits_to_difficulty
from gevent.event import Event
from powerpool.lib import loop
from powerpool.jobmanagers import Jobmanager
from binascii import hexlify
class MonitorNetworkMulti(Jobmanager):
def __init__(self, config):
self._configure(config)
super(Jobmanager, self).__init__()
# Since some MonitorNetwork objs are polling and some aren't....
self.gl_methods = ['update_profit']
# Child jobmanagers
self.jobmanagers = {}
self.price_data = {}
self.profit_data = {}
self.next_network = None
self.current_network = None
# Currently active jobs keyed by their unique ID
self.jobs = {}
self.new_job = Event()
self.redis = redis.Redis(**self.config['redis'])
@property
def latest_job(self):
""" Proxy the jobmanager we're currently mining ons job """
return self.jobmanagers[self.current_network].latest_job
@property
def status(self):
""" For display in the http monitor """
return dict(price_data=self.price_data,
profit_data=self.profit_data,
next_network=self.next_network,
current_network=self.current_network)
@loop(interval='profit_poll_int')
def update_profit(self):
""" Continually check redis for new profit information """
# Acessing Redis can cause greenlet switches because new jobs. We don't
# want to potentially switch jobs multiple times quickly, so we update
# the profitability information all at once after the loop to avoid
# multiple network switches
new_price_data = {}
for manager in self.jobmanagers.itervalues():
currency = manager.config['currency']
pscore = self.redis.get("{}_profit".format(currency))
# Deserialize
if pscore:
try:
pscore = simplejson.loads(pscore, use_decimal=True)
except Exception:
self.logger.warn(
"Error parsing profit score for {}! Setting it to 0.."
.format(currency))
pscore = 0
pass
# If no score was grabbed, pass a 0 value score
else:
self.logger.warn("Unable to grab profit info for {}!"
.format(currency))
pscore = 0
ratio = self.redis.get("{}_ratio".format(currency)) or 1.0
ratio = float(ratio)
# Only set updated if it actually changed
if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio:
new_price_data[currency] = (pscore, ratio, time.time())
# If we have some new information, adjust accordingly
if new_price_data:
self.logger.info("Updated price information for {}"
.format(new_price_data.keys()))
# Atomic update in gevent
self.price_data.update(new_price_data)
# Update all the profit info. No preemption, just maths
for currency in self.jobmanagers.iterkeys():
self.update_profitability(currency)
self.logger.debug(
"Re-checking best network after new price data for {}"
.format(new_price_data.keys()))
self.check_best()
def check_best(self):
""" Assuming that `profit_data` is completely up to date, evaluate the
most profitable network and switch immediately if there's a big enough
difference. Otherwise set it to be changed at next block notification.
"""
# Get the most profitable network based on our current data
new_best = max(self.profit_data.iteritems(),
key=operator.itemgetter(1))[0]
if self.current_network is None:
self.logger.info(
"No active network, so switching to {} with profit of {:,.4f}"
.format(new_best, self.profit_data[new_best]))
self.next_network = new_best
self.switch_network()
return
# If the currently most profitable network is 120% the profitability
# of what we're mining on, we should switch immediately
margin_switch = self.config['margin_switch']
if (margin_switch and
self.profit_data[self.next_network] >
(self.profit_data[self.current_network] * margin_switch)):
self.logger.info(
"Network {} {:,.4f} now more profitable than current network "
"{} {:,.4f} by a fair margin. Switching NOW."
.format(new_best, self.profit_data[new_best], self.current_network,
self.profit_data[self.current_network]))
self.next_network = new_best
self.switch_network()
return
if new_best != self.next_network:
self.logger.info(
"Network {} {:,.4f} now more profitable than current best "
"{} {:,.4f}. Switching on next block from current network {}."
.format(new_best, self.profit_data[new_best], self.next_network,
self.profit_data[self.next_network], self.current_network))
self.next_network = new_best
return
self.logger.debug("Network {} {:,.4f} still most profitable"
.format(new_best, self.profit_data[new_best]))
def switch_network(self):
""" Pushes a network change to the user if it's needed """
if self.next_network != self.current_network:
job = self.jobmanagers[self.next_network].latest_job
if job is None:
self.logger.error(
"Tried to switch network to {} that has no job!"
.format(self.next_network))
return
if self.current_network:
self.logger.info(
"Switching from {} {:,.4f} -> {} {:,.4f} and pushing job NOW"
.format(self.current_network, self.profit_data[self.current_network],
self.next_network, self.profit_data[self.next_network]))
self.current_network = self.next_network
job.type = 0
self.new_job.job = job
self.new_job.set()
self.new_job.clear()
return True
return False
def update_profitability(self, currency):
""" Recalculates the profitability for a specific currency """
jobmanager = self.jobmanagers[currency]
last_job = jobmanager.latest_job
pscore, ratio, _ = self.price_data[currency]
# We can't update if we don't have a job and profit data
if last_job is None or pscore is None:
return False
max_blockheight = jobmanager.config['max_blockheight']
if max_blockheight is not None and last_job.block_height >= max_blockheight:
self.profit_data[currency] = 0
self.logger.debug(
"{} height {} is >= the configured maximum blockheight of {}, "
"setting profitability to 0."
.format(currency, last_job.block_height, max_blockheight))
return True
block_value = last_job.total_value / 100000000.0
diff = bits_to_difficulty(hexlify(last_job.bits))
self.profit_data[currency] = (block_value * float(pscore) / diff) * ratio * 1000000
self.logger.debug(
"Updating {} profit data;\n\tblock_value {};\n\tavg_price {:,.8f}"
";\n\tdiff {};\n\tratio {};\n\tresult {}"
.format(currency, block_value, float(pscore), diff,
ratio, self.profit_data[currency]))
self.manager.log_event("{name}.profitability.{curr}:{metric}|g"
.format(name=self.manager.config['procname'],
curr=currency,
metric=self.profit_data[currency]))
return True
def new_job_notif(self, event):
if not hasattr('job', event):
self.logger.info("No blocks mined yet, skipping switch logic")
return
currency = event.job.currency
flush = event.job.type == 0
if currency == self.current_network:
self.logger.info("Recieved new job on most profitable network {}"
.format(currency))
# See if we need to switch now that we're done with that block. If
# not, push a new job on this network
if not self.switch_network():
self.new_job.job = event.job
self.new_job.set()
self.new_job.clear()
# If we're recieving a new block then diff has changed, so update the
# network profit and recompute best network
if flush and self.update_profitability(currency):
self.logger.debug("Re-checking best network after new job from {}"
.format(currency))
self.check_best()
def | (self):
Jobmanager.start(self)
self.config['jobmanagers'] = set(self.config['jobmanagers'])
found_managers = set()
for manager in self.manager.component_types['Jobmanager']:
if manager.key in self.config['jobmanagers']:
currency = manager.config['currency']
self.jobmanagers[currency] = manager
self.profit_data[currency] = 0
self.price_data[currency] = (None, None, None)
found_managers.add(manager.key)
manager.new_job.rawlink(self.new_job_notif)
for monitor in self.config['jobmanagers'] - found_managers:
self.logger.error("Unable to locate Jobmanager(s) '{}'".format(monitor))
| start | identifier_name |
switching_jobmanager.py | import redis
import simplejson
import time
import operator
from cryptokit import bits_to_difficulty
from gevent.event import Event
from powerpool.lib import loop
from powerpool.jobmanagers import Jobmanager
from binascii import hexlify
class MonitorNetworkMulti(Jobmanager):
def __init__(self, config):
self._configure(config)
super(Jobmanager, self).__init__()
# Since some MonitorNetwork objs are polling and some aren't....
self.gl_methods = ['update_profit']
# Child jobmanagers
self.jobmanagers = {}
self.price_data = {}
self.profit_data = {}
self.next_network = None
self.current_network = None
# Currently active jobs keyed by their unique ID
self.jobs = {}
self.new_job = Event()
self.redis = redis.Redis(**self.config['redis'])
@property
def latest_job(self):
""" Proxy the jobmanager we're currently mining ons job """
return self.jobmanagers[self.current_network].latest_job
@property
def status(self):
""" For display in the http monitor """
return dict(price_data=self.price_data,
profit_data=self.profit_data,
next_network=self.next_network,
current_network=self.current_network)
@loop(interval='profit_poll_int')
def update_profit(self):
""" Continually check redis for new profit information """
# Acessing Redis can cause greenlet switches because new jobs. We don't
# want to potentially switch jobs multiple times quickly, so we update
# the profitability information all at once after the loop to avoid
# multiple network switches
new_price_data = {}
for manager in self.jobmanagers.itervalues():
currency = manager.config['currency']
pscore = self.redis.get("{}_profit".format(currency))
# Deserialize
if pscore:
try:
pscore = simplejson.loads(pscore, use_decimal=True)
except Exception:
self.logger.warn(
"Error parsing profit score for {}! Setting it to 0.."
.format(currency))
pscore = 0
pass
# If no score was grabbed, pass a 0 value score
else:
self.logger.warn("Unable to grab profit info for {}!"
.format(currency))
pscore = 0
ratio = self.redis.get("{}_ratio".format(currency)) or 1.0
ratio = float(ratio)
# Only set updated if it actually changed
if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio:
new_price_data[currency] = (pscore, ratio, time.time())
# If we have some new information, adjust accordingly
if new_price_data:
self.logger.info("Updated price information for {}"
.format(new_price_data.keys()))
# Atomic update in gevent
self.price_data.update(new_price_data)
# Update all the profit info. No preemption, just maths
for currency in self.jobmanagers.iterkeys():
self.update_profitability(currency)
self.logger.debug(
"Re-checking best network after new price data for {}"
.format(new_price_data.keys()))
self.check_best()
def check_best(self):
""" Assuming that `profit_data` is completely up to date, evaluate the
most profitable network and switch immediately if there's a big enough
difference. Otherwise set it to be changed at next block notification.
"""
# Get the most profitable network based on our current data
new_best = max(self.profit_data.iteritems(),
key=operator.itemgetter(1))[0]
if self.current_network is None:
self.logger.info(
"No active network, so switching to {} with profit of {:,.4f}"
.format(new_best, self.profit_data[new_best]))
self.next_network = new_best
self.switch_network()
return
# If the currently most profitable network is 120% the profitability
# of what we're mining on, we should switch immediately
margin_switch = self.config['margin_switch']
if (margin_switch and
self.profit_data[self.next_network] >
(self.profit_data[self.current_network] * margin_switch)):
self.logger.info(
"Network {} {:,.4f} now more profitable than current network "
"{} {:,.4f} by a fair margin. Switching NOW."
.format(new_best, self.profit_data[new_best], self.current_network,
self.profit_data[self.current_network]))
self.next_network = new_best
self.switch_network()
return
if new_best != self.next_network:
self.logger.info(
"Network {} {:,.4f} now more profitable than current best "
"{} {:,.4f}. Switching on next block from current network {}."
.format(new_best, self.profit_data[new_best], self.next_network,
self.profit_data[self.next_network], self.current_network))
self.next_network = new_best
return
self.logger.debug("Network {} {:,.4f} still most profitable"
.format(new_best, self.profit_data[new_best]))
def switch_network(self):
""" Pushes a network change to the user if it's needed """
if self.next_network != self.current_network:
job = self.jobmanagers[self.next_network].latest_job
if job is None:
self.logger.error(
"Tried to switch network to {} that has no job!"
.format(self.next_network))
return
if self.current_network:
self.logger.info(
"Switching from {} {:,.4f} -> {} {:,.4f} and pushing job NOW"
.format(self.current_network, self.profit_data[self.current_network],
self.next_network, self.profit_data[self.next_network]))
self.current_network = self.next_network
job.type = 0
self.new_job.job = job
self.new_job.set()
self.new_job.clear()
return True
return False
def update_profitability(self, currency):
""" Recalculates the profitability for a specific currency """
jobmanager = self.jobmanagers[currency]
last_job = jobmanager.latest_job
pscore, ratio, _ = self.price_data[currency]
# We can't update if we don't have a job and profit data
if last_job is None or pscore is None:
return False
max_blockheight = jobmanager.config['max_blockheight']
if max_blockheight is not None and last_job.block_height >= max_blockheight: | "{} height {} is >= the configured maximum blockheight of {}, "
"setting profitability to 0."
.format(currency, last_job.block_height, max_blockheight))
return True
block_value = last_job.total_value / 100000000.0
diff = bits_to_difficulty(hexlify(last_job.bits))
self.profit_data[currency] = (block_value * float(pscore) / diff) * ratio * 1000000
self.logger.debug(
"Updating {} profit data;\n\tblock_value {};\n\tavg_price {:,.8f}"
";\n\tdiff {};\n\tratio {};\n\tresult {}"
.format(currency, block_value, float(pscore), diff,
ratio, self.profit_data[currency]))
self.manager.log_event("{name}.profitability.{curr}:{metric}|g"
.format(name=self.manager.config['procname'],
curr=currency,
metric=self.profit_data[currency]))
return True
def new_job_notif(self, event):
if not hasattr('job', event):
self.logger.info("No blocks mined yet, skipping switch logic")
return
currency = event.job.currency
flush = event.job.type == 0
if currency == self.current_network:
self.logger.info("Recieved new job on most profitable network {}"
.format(currency))
# See if we need to switch now that we're done with that block. If
# not, push a new job on this network
if not self.switch_network():
self.new_job.job = event.job
self.new_job.set()
self.new_job.clear()
# If we're recieving a new block then diff has changed, so update the
# network profit and recompute best network
if flush and self.update_profitability(currency):
self.logger.debug("Re-checking best network after new job from {}"
.format(currency))
self.check_best()
def start(self):
Jobmanager.start(self)
self.config['jobmanagers'] = set(self.config['jobmanagers'])
found_managers = set()
for manager in self.manager.component_types['Jobmanager']:
if manager.key in self.config['jobmanagers']:
currency = manager.config['currency']
self.jobmanagers[currency] = manager
self.profit_data[currency] = 0
self.price_data[currency] = (None, None, None)
found_managers.add(manager.key)
manager.new_job.rawlink(self.new_job_notif)
for monitor in self.config['jobmanagers'] - found_managers:
self.logger.error("Unable to locate Jobmanager(s) '{}'".format(monitor)) | self.profit_data[currency] = 0
self.logger.debug( | random_line_split |
switching_jobmanager.py | import redis
import simplejson
import time
import operator
from cryptokit import bits_to_difficulty
from gevent.event import Event
from powerpool.lib import loop
from powerpool.jobmanagers import Jobmanager
from binascii import hexlify
class MonitorNetworkMulti(Jobmanager):
def __init__(self, config):
self._configure(config)
super(Jobmanager, self).__init__()
# Since some MonitorNetwork objs are polling and some aren't....
self.gl_methods = ['update_profit']
# Child jobmanagers
self.jobmanagers = {}
self.price_data = {}
self.profit_data = {}
self.next_network = None
self.current_network = None
# Currently active jobs keyed by their unique ID
self.jobs = {}
self.new_job = Event()
self.redis = redis.Redis(**self.config['redis'])
@property
def latest_job(self):
""" Proxy the jobmanager we're currently mining ons job """
return self.jobmanagers[self.current_network].latest_job
@property
def status(self):
""" For display in the http monitor """
return dict(price_data=self.price_data,
profit_data=self.profit_data,
next_network=self.next_network,
current_network=self.current_network)
@loop(interval='profit_poll_int')
def update_profit(self):
""" Continually check redis for new profit information """
# Acessing Redis can cause greenlet switches because new jobs. We don't
# want to potentially switch jobs multiple times quickly, so we update
# the profitability information all at once after the loop to avoid
# multiple network switches
new_price_data = {}
for manager in self.jobmanagers.itervalues():
currency = manager.config['currency']
pscore = self.redis.get("{}_profit".format(currency))
# Deserialize
if pscore:
try:
pscore = simplejson.loads(pscore, use_decimal=True)
except Exception:
self.logger.warn(
"Error parsing profit score for {}! Setting it to 0.."
.format(currency))
pscore = 0
pass
# If no score was grabbed, pass a 0 value score
else:
self.logger.warn("Unable to grab profit info for {}!"
.format(currency))
pscore = 0
ratio = self.redis.get("{}_ratio".format(currency)) or 1.0
ratio = float(ratio)
# Only set updated if it actually changed
if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio:
new_price_data[currency] = (pscore, ratio, time.time())
# If we have some new information, adjust accordingly
if new_price_data:
self.logger.info("Updated price information for {}"
.format(new_price_data.keys()))
# Atomic update in gevent
self.price_data.update(new_price_data)
# Update all the profit info. No preemption, just maths
for currency in self.jobmanagers.iterkeys():
self.update_profitability(currency)
self.logger.debug(
"Re-checking best network after new price data for {}"
.format(new_price_data.keys()))
self.check_best()
def check_best(self):
""" Assuming that `profit_data` is completely up to date, evaluate the
most profitable network and switch immediately if there's a big enough
difference. Otherwise set it to be changed at next block notification.
"""
# Get the most profitable network based on our current data
new_best = max(self.profit_data.iteritems(),
key=operator.itemgetter(1))[0]
if self.current_network is None:
self.logger.info(
"No active network, so switching to {} with profit of {:,.4f}"
.format(new_best, self.profit_data[new_best]))
self.next_network = new_best
self.switch_network()
return
# If the currently most profitable network is 120% the profitability
# of what we're mining on, we should switch immediately
margin_switch = self.config['margin_switch']
if (margin_switch and
self.profit_data[self.next_network] >
(self.profit_data[self.current_network] * margin_switch)):
self.logger.info(
"Network {} {:,.4f} now more profitable than current network "
"{} {:,.4f} by a fair margin. Switching NOW."
.format(new_best, self.profit_data[new_best], self.current_network,
self.profit_data[self.current_network]))
self.next_network = new_best
self.switch_network()
return
if new_best != self.next_network:
self.logger.info(
"Network {} {:,.4f} now more profitable than current best "
"{} {:,.4f}. Switching on next block from current network {}."
.format(new_best, self.profit_data[new_best], self.next_network,
self.profit_data[self.next_network], self.current_network))
self.next_network = new_best
return
self.logger.debug("Network {} {:,.4f} still most profitable"
.format(new_best, self.profit_data[new_best]))
def switch_network(self):
""" Pushes a network change to the user if it's needed """
if self.next_network != self.current_network:
job = self.jobmanagers[self.next_network].latest_job
if job is None:
self.logger.error(
"Tried to switch network to {} that has no job!"
.format(self.next_network))
return
if self.current_network:
self.logger.info(
"Switching from {} {:,.4f} -> {} {:,.4f} and pushing job NOW"
.format(self.current_network, self.profit_data[self.current_network],
self.next_network, self.profit_data[self.next_network]))
self.current_network = self.next_network
job.type = 0
self.new_job.job = job
self.new_job.set()
self.new_job.clear()
return True
return False
def update_profitability(self, currency):
""" Recalculates the profitability for a specific currency """
jobmanager = self.jobmanagers[currency]
last_job = jobmanager.latest_job
pscore, ratio, _ = self.price_data[currency]
# We can't update if we don't have a job and profit data
if last_job is None or pscore is None:
return False
max_blockheight = jobmanager.config['max_blockheight']
if max_blockheight is not None and last_job.block_height >= max_blockheight:
self.profit_data[currency] = 0
self.logger.debug(
"{} height {} is >= the configured maximum blockheight of {}, "
"setting profitability to 0."
.format(currency, last_job.block_height, max_blockheight))
return True
block_value = last_job.total_value / 100000000.0
diff = bits_to_difficulty(hexlify(last_job.bits))
self.profit_data[currency] = (block_value * float(pscore) / diff) * ratio * 1000000
self.logger.debug(
"Updating {} profit data;\n\tblock_value {};\n\tavg_price {:,.8f}"
";\n\tdiff {};\n\tratio {};\n\tresult {}"
.format(currency, block_value, float(pscore), diff,
ratio, self.profit_data[currency]))
self.manager.log_event("{name}.profitability.{curr}:{metric}|g"
.format(name=self.manager.config['procname'],
curr=currency,
metric=self.profit_data[currency]))
return True
def new_job_notif(self, event):
| .format(currency))
self.check_best()
def start(self):
Jobmanager.start(self)
self.config['jobmanagers'] = set(self.config['jobmanagers'])
found_managers = set()
for manager in self.manager.component_types['Jobmanager']:
if manager.key in self.config['jobmanagers']:
currency = manager.config['currency']
self.jobmanagers[currency] = manager
self.profit_data[currency] = 0
self.price_data[currency] = (None, None, None)
found_managers.add(manager.key)
manager.new_job.rawlink(self.new_job_notif)
for monitor in self.config['jobmanagers'] - found_managers:
self.logger.error("Unable to locate Jobmanager(s) '{}'".format(monitor))
| if not hasattr('job', event):
self.logger.info("No blocks mined yet, skipping switch logic")
return
currency = event.job.currency
flush = event.job.type == 0
if currency == self.current_network:
self.logger.info("Recieved new job on most profitable network {}"
.format(currency))
# See if we need to switch now that we're done with that block. If
# not, push a new job on this network
if not self.switch_network():
self.new_job.job = event.job
self.new_job.set()
self.new_job.clear()
# If we're recieving a new block then diff has changed, so update the
# network profit and recompute best network
if flush and self.update_profitability(currency):
self.logger.debug("Re-checking best network after new job from {}" | identifier_body |
kite.go | }
//重连管理器
reconnManager := turbo.NewReconnectManager(true, 30*time.Second,
100, handshake)
k.clientManager = turbo.NewClientManager(reconnManager)
//构造pipeline的结构
pipeline := turbo.NewDefaultPipeline()
ackHandler := NewAckHandler("ack", k.clientManager)
accept := NewAcceptHandler("accept", k.listener)
remoting := turbo.NewRemotingHandler("remoting", k.clientManager)
//对于ack和acceptevent使用不同的线程池,优先级不同
msgPool := turbo.NewLimitPool(k.ctx, 50)
ackPool := turbo.NewLimitPool(k.ctx, 5)
storeAckPool := turbo.NewLimitPool(k.ctx, 5)
defaultPool := turbo.NewLimitPool(k.ctx, 5)
//pools
pools := make(map[uint8]*turbo.GPool)
pools[protocol.CMD_CONN_AUTH] = ackPool
pools[protocol.CMD_HEARTBEAT] = ackPool
pools[protocol.CMD_MESSAGE_STORE_ACK] = storeAckPool
pools[protocol.CMD_TX_ACK] = msgPool
pools[protocol.CMD_BYTES_MESSAGE] = msgPool
pools[protocol.CMD_STRING_MESSAGE] = msgPool
k.pools = pools
k.defaultPool = defaultPool
unmarshal := NewUnmarshalHandler("unmarshal",
pools,
defaultPool)
pipeline.RegisteHandler("unmarshal", unmarshal)
pipeline.RegisteHandler("ack", ackHandler)
pipeline.RegisteHandler("accept", accept)
pipeline.RegisteHandler("remoting", remoting)
k.pipeline = pipeline
//注册kiteqserver的变更
k.registryCenter.RegisterWatcher(k)
hostname, _ := os.Hostname()
//推送本机到
err := k.registryCenter.PublishTopics(k.topics, k.ga.GroupId, hostname)
if nil != err {
log.Errorf("kite|PublishTopics|FAIL|%s|%s", err, k.topics)
} else {
log.Infof("kite|PublishTopics|SUCC|%s", k.topics)
}
outter:
for _, b := range k.binds {
for _, t := range k.topics {
if t == b.Topic {
continue outter
}
}
k.topics = append(k.topics, b.Topic)
}
for _, topic := range k.topics {
hosts, err := k.registryCenter.GetQServerAndWatch(topic)
if nil != err {
log.Errorf("kite|GetQServerAndWatch|FAIL|%s|%s", err, topic)
} else {
log.Infof("kite|GetQServerAndWatch|SUCC|%s|%s", topic, hosts)
}
k.OnQServerChanged(topic, hosts)
}
length := 0
k.topicToAddress.Range(func(key, value interface{}) bool {
length++
return true
})
if length <= 0 {
log.Errorf("kite|Start|NO VALID KITESERVER|%s", k.topics)
}
if !k.isPreEnv && len(k.binds) > 0 {
//订阅关系推送,并拉取QServer
err = k.registryCenter.PublishBindings(k.ga.GroupId, k.binds)
if nil != err {
log.Errorf("kite|PublishBindings|FAIL|%s|%v", err, k.binds)
}
}
if k.isPreEnv {
rawBinds, _ := json.Marshal(k.binds)
log.Infof("kite|PublishBindings|Ignored|[preEnv:%v]|%s...", k.isPreEnv, string(rawBinds))
}
//开启流量统计
k.remointflow()
go k.heartbeat()
go k.poolMonitor()
}
//poolMonitor
func (k *kite) poolMonitor() {
for {
select {
case <-k.ctx.Done():
break
default:
}
keys := make([]int, 0, len(k.pools))
for cmdType := range k.pools {
keys = append(keys, int(cmdType))
}
sort.Ints(keys)
str := fmt.Sprintf("Cmd-Pool\tGoroutines:%d\t", runtime.NumGoroutine())
for _, cmdType := range keys {
p := k.pools[uint8(cmdType)]
used, capsize := p.Monitor()
str += fmt.Sprintf("%s:%d/%d\t", protocol.NameOfCmd(uint8(cmdType)), used, capsize)
}
used, capsize := k.defaultPool.Monitor()
str += fmt.Sprintf("default:%d/%d\t", used, capsize)
log.Infof(str)
time.Sleep(1 * time.Second)
}
}
//kiteQClient的处理器
func (k *kite) fire(ctx *turbo.TContext) error {
p := ctx.Message
c := ctx.Client
event := turbo.NewPacketEvent(c, p)
err := k.pipeline.FireWork(event)
if nil != err {
log.Errorf("kite|onPacketReceive|FAIL|%s|%v", err, p)
return err
}
return nil
}
//创建物理连接
func dial(hostport string) (*net.TCPConn, error) {
//连接
remoteAddr, err_r := net.ResolveTCPAddr("tcp4", hostport)
if nil != err_r {
log.Errorf("kite|RECONNECT|RESOLVE ADDR |FAIL|remote:%s", err_r)
return nil, err_r
}
conn, err := net.DialTCP("tcp4", nil, remoteAddr)
if nil != err {
log.Errorf("kite|RECONNECT|%s|FAIL|%s", hostport, err)
return nil, err
}
return conn, nil
}
//握手包
func handshake(ga *turbo.GroupAuth, remoteClient *turbo.TClient) (bool, error) {
for i := 0; i < 3; i++ {
p := protocol.MarshalConnMeta(ga.GroupId, ga.SecretKey, int32(ga.WarmingupSec))
rpacket := turbo.NewPacket(protocol.CMD_CONN_META, p)
resp, err := remoteClient.WriteAndGet(*rpacket, 5*time.Second)
if nil != err {
//两秒后重试
time.Sleep(2 * time.Second)
log.Warnf("kiteIO|handShake|FAIL|%s|%s", ga.GroupId, err)
} else {
authAck, ok := resp.(*protocol.ConnAuthAck)
if !ok {
return false, errors.New("Unmatches Handshake Ack Type! ")
} else {
if authAck.GetStatus() {
log.Infof("kiteIO|handShake|SUCC|%s|%s", ga.GroupId, authAck.GetFeedback())
return true, nil
} else {
log.Warnf("kiteIO|handShake|FAIL|%s|%s", ga.GroupId, authAck.GetFeedback())
return false, errors.New("Auth FAIL![" + authAck.GetFeedback() + "]")
}
}
}
}
return false, errors.New("handshake fail! [" + remoteClient.RemoteAddr() + "]")
}
func (k *kite) SetPublishTopics(topics []string) {
k.topics = append(k.topics, topics...)
}
func (k *kite) SetBindings(bindings []*registry.Binding) {
for _, b := range bindings {
b.GroupId = k.ga.GroupId
if nil != b.Handler {
k.listener.RegisteHandler(b)
}
}
k.binds = bindings
}
//发送事务消息
func (k *kite) SendTxMessage(msg *protocol.QMessage, doTransaction DoTransaction) (err error) {
msg.GetHeader().GroupId = protocol.MarshalPbString(k.ga.GroupId)
//路由选择策略
c, err := k.selectKiteClient(msg.GetHeader())
if nil != err {
return err
}
//先发送消息
err = sendMessage(c, msg)
if nil != err {
return err
}
//执行本地事务返回succ为成功则提交、其余条件包括错误、失败都属于回滚
feedback := ""
succ := false
txstatus := protocol.TX_UNKNOWN
//执行本地事务
succ, err = doTransaction(msg)
if nil == err && succ {
txstatus = protocol.TX_COMMIT
} else {
txstatus = protocol.TX_ROLLBACK
if nil != err {
feedback = err.Error()
}
}
//发送txack到服务端
sendTxAck(c, msg, txstatus, feedback)
return err
}
//发送消息
func (k *kite) SendMessage(msg *protocol.QMessage) error {
//fix header groupId
msg.GetHeader().GroupId = protocol.MarshalPbString(k.ga.GroupId)
//select client
c, err := k.selectKiteClient(msg.GetHeader())
if nil != err {
return err
}
return sendMessage(c, msg)
}
//kiteclient路由选择策略
func (k *kite) selectKiteClient(header *protocol.Header) (*turbo.TClient, error) {
return k.cliSelector.Select(header, k.topicToAddress, k.addressToTClient, func(tc *turbo.TClient) bool | {
//只接收
| identifier_name |
|
kite.go | 24, 10000, 10000,
10*time.Second,
50*10000)
ctx, closed := context.WithCancel(parent)
registryCenter := registry.NewRegistryCenter(ctx, registryUri)
ga := turbo.NewGroupAuth(groupId, secretKey)
ga.WarmingupSec = warmingupSec
manager := &kite{
ga: ga,
topicToAddress: &sync.Map{},
addressToTClient: &sync.Map{},
topics: make([]string, 0, 10),
config: config,
flowstat: flowstat,
registryUri: registryUri,
registryCenter: registryCenter,
ctx: ctx,
closed: closed,
listener: listener,
heartbeatPeriod: 10 * time.Second,
heartbeatTimeout: 5 * time.Second,
cliSelector: NewRandomSelector(),
}
registryCenter.RegisterWatcher(manager)
return manager
}
func (k *kite) remointflow() {
go func() {
t := time.NewTicker(1 * time.Second)
for {
ns := k.config.FlowStat.Stat()
log.Infof("Remoting read:%d/%d\twrite:%d/%d\tdispatcher_go:%d/%d\tconnetions:%d", ns.ReadBytes, ns.ReadCount,
ns.WriteBytes, ns.WriteCount, ns.DisPoolSize, ns.DisPoolCap, k.clientManager.ConnNum())
<-t.C
}
}()
}
//废弃了设置listner
//会自动创建默认的Listener,只需要在订阅期间Binding设置处理器即可
func (k *kite) SetListener(listener IListener) {
k.listener = listener
}
func (k *kite) GetListener() IListener {
return k.listener
}
//启动
func (k *kite) Start() {
//没有listenr的直接启动报错
if nil == k.listener {
panic("KiteClient Listener Not Set !")
}
//如果是预发环境,则加入预发环境的group后缀
if k.isPreEnv {
k.ga.GroupId = fmt.Sprintf("%s-pre", k.ga.GroupId)
}
//重连管理器
reconnManager := turbo.NewReconnectManager(true, 30*time.Second,
100, handshake)
k.clientManager = turbo.NewClientManager(reconnManager)
//构造pipeline的结构
pipeline := turbo.NewDefaultPipeline()
ackHandler := NewAckHandler("ack", k.clientManager)
accept := NewAcceptHandler("accept", k.listener)
remoting := turbo.NewRemotingHandler("remoting", k.clientManager)
//对于ack和acceptevent使用不同的线程池,优先级不同
msgPool := turbo.NewLimitPool(k.ctx, 50)
ackPool := turbo.NewLimitPool(k.ctx, 5)
storeAckPool := turbo.NewLimitPool(k.ctx, 5)
defaultPool := turbo.NewLimitPool(k.ctx, 5)
//pools
pools := make(map[uint8]*turbo.GPool)
pools[protocol.CMD_CONN_AUTH] = ackPool
pools[protocol.CMD_HEARTBEAT] = ackPool
pools[protocol.CMD_MESSAGE_STORE_ACK] = storeAckPool
pools[protocol.CMD_TX_ACK] = msgPool
pools[protocol.CMD_BYTES_MESSAGE] = msgPool
pools[protocol.CMD_STRING_MESSAGE] = msgPool
k.pools = pools
k.defaultPool = defaultPool
unmarshal := NewUnmarshalHandler("unmarshal",
pools,
defaultPool)
pipeline.RegisteHandler("unmarshal", unmarshal)
pipeline.RegisteHandler("ack", ackHandler)
pipeline.RegisteHandler("accept", accept)
pipeline.RegisteHandler("remoting", remoting)
k.pipeline = pipeline
//注册kiteqserver的变更
k.registryCenter.RegisterWatcher(k)
hostname, _ := os.Hostname()
//推送本机到
err := k.registryCenter.PublishTopics(k.topics, k.ga.GroupId, hostname)
if nil != err {
log.Errorf("kite|PublishTopics|FAIL|%s|%s", err, k.topics)
} else {
log.Infof("kite|PublishTopics|SUCC|%s", k.topics)
}
outter:
for _, b := range k.binds {
for _, t := range k.topics {
if t == b.Topic {
continue outter
}
}
k.topics = append(k.topics, b.Topic)
}
for _, topic := range k.topics {
hosts, err := k.registryCenter.GetQServerAndWatch(topic)
if nil != err {
log.Errorf("kite|GetQServerAndWatch|FAIL|%s|%s", err, topic)
} else {
log.Infof("kite|GetQServerAndWatch|SUCC|%s|%s", topic, hosts)
}
k.OnQServerChanged(topic, hosts)
}
length := 0
k.topicToAddress.Range(func(key, value interface{}) bool {
length++
return true
})
if length <= 0 {
log.Errorf("kite|Start|NO VALID KITESERVER|%s", k.topics)
}
if !k.isPreEnv && len(k.binds) > 0 {
//订阅关系推送,并拉取QServer
err = k.registryCenter.PublishBindings(k.ga.GroupId, k.binds)
if nil != err {
log.Errorf("kite|PublishBindings|FAIL|%s|%v", err, k.binds)
}
}
if k.isPreEnv {
rawBinds, _ := json.Marshal(k.binds)
log.Infof("kite|PublishBindings|Ignored|[preEnv:%v]|%s...", k.isPreEnv, string(rawBinds))
}
//开启流量统计
k.remointflow()
go k.heartbeat()
go k.poolMonitor()
}
//poolMonitor
func (k *kite) poolMonitor() {
for {
select {
case <-k.ctx.Done():
break
default:
}
keys := make([]int, 0, len(k.pools))
for cmdType := range k.pools {
keys = append(keys, int(cmdType))
}
sort.Ints(keys)
str := fmt.Sprintf("Cmd-Pool\tGoroutines:%d\t", runtime.NumGoroutine())
for _, cmdType := range keys {
p := k.pools[uint8(cmdType)]
used, capsize := p.Monitor()
str += fmt.Sprintf("%s:%d/%d\t", protocol.NameOfCmd(uint8(cmdType)), used, capsize)
}
used, capsize := k.defaultPool.Monitor()
str += fmt.Sprintf("default:%d/%d\t", used, capsize)
log.Infof(str)
time.Sleep(1 * time.Second)
}
}
//kiteQClient的处理器
func (k *kite) fire(ctx *turbo.TContext) error {
p := ctx.Message
c := ctx.Client
event := turbo.NewPacketEvent(c, p)
err := k.pipeline.FireWork(event)
if nil != err {
log.Errorf("kite|onPacketReceive|FAIL|%s|%v", err, p)
return err
}
return nil
}
//创建物理连接
func dial(hostport string) (*net.TCPConn, error) {
//连接
remoteAddr, err_r := net.ResolveTCPAddr("tcp4", hostport)
if nil != err_r {
log.Errorf("kite|RECONNECT|RESOLVE ADDR |FAIL|remote:%s", err_r)
return nil, err_r
}
conn, err := net.DialTCP("tcp4", nil, remoteAddr)
if nil != err {
log.Errorf("kite|RECONNECT|%s|FAIL|%s", hostport, err)
return nil, err | f nil != err {
//两秒后重试
time.Sleep(2 * time.Second)
log.Warnf("kiteIO|handShake|FAIL|%s|%s", ga.GroupId, err)
} else {
authAck, ok := resp.(*protocol.ConnAuthAck)
if !ok {
return false, errors.New("Unmatches Handshake Ack Type! ")
} else {
if authAck.GetStatus() {
log.Infof("kiteIO|handShake|SUCC|%s|%s", ga.GroupId, authAck.GetFeedback())
return true, nil
} else {
log.Warnf("kiteIO|handShake|FAIL|%s|%s", ga.GroupId, authAck.GetFeedback())
return false, errors.New("Auth FAIL![" + authAck.GetFeedback() + "]")
}
}
|
}
return conn, nil
}
//握手包
func handshake(ga *turbo.GroupAuth, remoteClient *turbo.TClient) (bool, error) {
for i := 0; i < 3; i++ {
p := protocol.MarshalConnMeta(ga.GroupId, ga.SecretKey, int32(ga.WarmingupSec))
rpacket := turbo.NewPacket(protocol.CMD_CONN_META, p)
resp, err := remoteClient.WriteAndGet(*rpacket, 5*time.Second)
i | identifier_body |
kite.go | closed context.CancelFunc
pools map[uint8]*turbo.GPool
defaultPool *turbo.GPool
//心跳时间
heartbeatPeriod time.Duration
heartbeatTimeout time.Duration
cliSelector Strategy
}
func newKite(parent context.Context, registryUri, groupId, secretKey string, warmingupSec int, listener IListener) *kite {
flowstat := stat.NewFlowStat()
config := turbo.NewTConfig(
"remoting-"+groupId,
50, 16*1024,
16*1024, 10000, 10000,
10*time.Second,
50*10000)
ctx, closed := context.WithCancel(parent)
registryCenter := registry.NewRegistryCenter(ctx, registryUri)
ga := turbo.NewGroupAuth(groupId, secretKey)
ga.WarmingupSec = warmingupSec
manager := &kite{
ga: ga,
topicToAddress: &sync.Map{},
addressToTClient: &sync.Map{},
topics: make([]string, 0, 10),
config: config,
flowstat: flowstat,
registryUri: registryUri,
registryCenter: registryCenter,
ctx: ctx,
closed: closed,
listener: listener,
heartbeatPeriod: 10 * time.Second,
heartbeatTimeout: 5 * time.Second,
cliSelector: NewRandomSelector(),
}
registryCenter.RegisterWatcher(manager)
return manager
}
func (k *kite) remointflow() {
go func() {
t := time.NewTicker(1 * time.Second)
for {
ns := k.config.FlowStat.Stat()
log.Infof("Remoting read:%d/%d\twrite:%d/%d\tdispatcher_go:%d/%d\tconnetions:%d", ns.ReadBytes, ns.ReadCount,
ns.WriteBytes, ns.WriteCount, ns.DisPoolSize, ns.DisPoolCap, k.clientManager.ConnNum())
<-t.C
}
}()
}
//废弃了设置listner
//会自动创建默认的Listener,只需要在订阅期间Binding设置处理器即可
func (k *kite) SetListener(listener IListener) {
k.listener = listener
}
func (k *kite) GetListener() IListener {
return k.listener
}
//启动
func (k *kite) Start() {
//没有listenr的直接启动报错
if nil == k.listener {
panic("KiteClient Listener Not Set !")
}
//如果是预发环境,则加入预发环境的group后缀
if k.isPreEnv {
k.ga.GroupId = fmt.Sprintf("%s-pre", k.ga.GroupId)
}
//重连管理器
reconnManager := turbo.NewReconnectManager(true, 30*time.Second,
100, handshake)
k.clientManager = turbo.NewClientManager(reconnManager)
//构造pipeline的结构
pipeline := turbo.NewDefaultPipeline()
ackHandler := NewAckHandler("ack", k.clientManager)
accept := NewAcceptHandler("accept", k.listener)
remoting := turbo.NewRemotingHandler("remoting", k.clientManager)
//对于ack和acceptevent使用不同的线程池,优先级不同
msgPool := turbo.NewLimitPool(k.ctx, 50)
ackPool := turbo.NewLimitPool(k.ctx, 5)
storeAckPool := turbo.NewLimitPool(k.ctx, 5)
defaultPool := turbo.NewLimitPool(k.ctx, 5)
//pools
pools := make(map[uint8]*turbo.GPool)
pools[protocol.CMD_CONN_AUTH] = ackPool
pools[protocol.CMD_HEARTBEAT] = ackPool
pools[protocol.CMD_MESSAGE_STORE_ACK] = storeAckPool
pools[protocol.CMD_TX_ACK] = msgPool
pools[protocol.CMD_BYTES_MESSAGE] = msgPool
pools[protocol.CMD_STRING_MESSAGE] = msgPool
k.pools = pools
k.defaultPool = defaultPool
unmarshal := NewUnmarshalHandler("unmarshal",
pools,
defaultPool)
pipeline.RegisteHandler("unmarshal", unmarshal)
pipeline.RegisteHandler("ack", ackHandler)
pipeline.RegisteHandler("accept", accept)
pipeline.RegisteHandler("remoting", remoting)
k.pipeline = pipeline
//注册kiteqserver的变更
k.registryCenter.RegisterWatcher(k)
hostname, _ := os.Hostname()
//推送本机到
err := k.registryCenter.PublishTopics(k.topics, k.ga.GroupId, hostname)
if nil != err {
log.Errorf("kite|PublishTopics|FAIL|%s|%s", err, k.topics)
} else {
log.Infof("kite|PublishTopics|SUCC|%s", k.topics)
}
outter:
for _, b := range k.binds {
for _, t := range k.topics {
if t == b.Topic {
continue outter
}
}
k.topics = append(k.topics, b.Topic)
}
for _, topic := range k.topics {
hosts, err := k.registryCenter.GetQServerAndWatch(topic)
if nil != err {
log.Errorf("kite|GetQServerAndWatch|FAIL|%s|%s", err, topic)
} else {
log.Infof("kite|GetQServerAndWatch|SUCC|%s|%s", topic, hosts)
}
k.OnQServerChanged(topic, hosts)
}
length := 0
k.topicToAddress.Range(func(key, value interface{}) bool {
length++
return true
})
if length <= 0 {
log.Errorf("kite|Start|NO VALID KITESERVER|%s", k.topics)
}
if !k.isPreEnv && len(k.binds) > 0 {
//订阅关系推送,并拉取QServer
err = k.registryCenter.PublishBindings(k.ga.GroupId, k.binds)
if nil != err {
log.Errorf("kite|PublishBindings|FAIL|%s|%v", err, k.binds)
}
}
if k.isPreEnv {
rawBinds, _ := json.Marshal(k.binds)
log.Infof("kite|PublishBindings|Ignored|[preEnv:%v]|%s...", k.isPreEnv, string(rawBinds))
}
//开启流量统计
k.remointflow()
go k.heartbeat()
go k.poolMonitor()
}
//poolMonitor
func (k *kite) poolMonitor() {
for {
select {
case <-k.ctx.Done():
break
default:
}
keys := make([]int, 0, len(k.pools))
for cmdType := range k.pools {
keys = append(keys, int(cmdType))
}
sort.Ints(keys)
str := fmt.Sprintf("Cmd-Pool\tGoroutines:%d\t", runtime.NumGoroutine())
for _, cmdType := range keys {
p := k.pools[uint8(cmdType)]
used, capsize := p.Monitor()
str += fmt.Sprintf("%s:%d/%d\t", protocol.NameOfCmd(uint8(cmdType)), used, capsize)
}
used, capsize := k.defaultPool.Monitor()
str += fmt.Sprintf("default:%d/%d\t", used, capsize)
log.Infof(str)
time.Sleep(1 * time.Second)
}
}
//kiteQClient的处理器
func (k *kite) fire(ctx *turbo.TContext) error {
p := ctx.Message
c := ctx.Client
event := turbo.NewPacketEvent(c, p)
err := k.pipeline.FireWork(event)
if nil != err {
log.Errorf("kite|onPacketReceive|FAIL|%s|%v", err, p)
return err
}
return nil
}
//创建物理连接
func dial(hostport string) (*net.TCPConn, error) {
//连接
remoteAddr, err_r := net.ResolveTCPAddr("tcp4", hostport)
if nil != err_r {
log.Errorf("kite|RECONNECT|RESOLVE ADDR |FAIL|remote:%s", err_r)
return nil, err_r
}
conn, err := net.DialTCP("tcp4", nil, remoteAddr)
if nil != err {
log.Errorf("kite|RECONNECT|%s|FAIL|%s", hostport, err)
return nil, err
}
return conn, nil
}
//握手包
func handshake(ga *turbo.GroupAuth, remoteClient *turbo.TClient) (bool, error) {
for i := 0; i < 3; i++ {
p := protocol.MarshalConnMeta(ga.GroupId, ga.SecretKey, int32(ga.WarmingupSec))
rpacket := turbo.NewPacket(protocol.CMD_CONN_META, p)
resp, err := remoteClient.WriteAndGet(*rpacket, 5*time.Second)
if nil != err {
//两秒后重试
time.Sleep(2 * time.Second)
log.Warnf("kiteIO|handShake|FAIL|%s|%s", ga.GroupId, err)
} else {
| flowstat *stat.FlowStat
ctx context.Context | random_line_split |
|
kite.go | 24, 10000, 10000,
10*time.Second,
50*10000)
ctx, closed := context.WithCancel(parent)
registryCenter := registry.NewRegistryCenter(ctx, registryUri)
ga := turbo.NewGroupAuth(groupId, secretKey)
ga.WarmingupSec = warmingupSec
manager := &kite{
ga: ga,
topicToAddress: &sync.Map{},
addressToTClient: &sync.Map{},
topics: make([]string, 0, 10),
config: config,
flowstat: flowstat,
registryUri: registryUri,
registryCenter: registryCenter,
ctx: ctx,
closed: closed,
listener: listener,
heartbeatPeriod: 10 * time.Second,
heartbeatTimeout: 5 * time.Second,
cliSelector: NewRandomSelector(),
}
registryCenter.RegisterWatcher(manager)
return manager
}
func (k *kite) remointflow() {
go func() {
t := time.NewTicker(1 * time.Second)
for {
ns := k.config.FlowStat.Stat()
log.Infof("Remoting read:%d/%d\twrite:%d/%d\tdispatcher_go:%d/%d\tconnetions:%d", ns.ReadBytes, ns.ReadCount,
ns.WriteBytes, ns.WriteCount, ns.DisPoolSize, ns.DisPoolCap, k.clientManager.ConnNum())
<-t.C
}
}()
}
//废弃了设置listner
//会自动创建默认的Listener,只需要在订阅期间Binding设置处理器即可
func (k *kite) SetListener(listener IListener) {
k.listener = listener
}
func (k *kite) GetListener() IListener {
return k.listener
}
//启动
func (k *kite) Start() {
//没有listenr的直接启动报错
if nil == k.listener {
panic("KiteClient Listener Not Set !")
}
//如果是预发环境,则加入预发环境的group后缀
if k.isPreEnv {
k.ga.GroupId = fmt.Sprintf("%s-pre", k.ga.GroupId)
}
//重连管理器
reconnManager := turbo.NewReconnectManager(true, 30*time.Second,
100, handshake)
k.clientManager = turbo.NewClientManager(reconnManager)
//构造pipeline的结构
pipeline := turbo.NewDefaultPipeline()
ackHandler := NewAckHandler("ack", k.clientManager)
accept := NewAcceptHandler("accept", k.listener)
remoting := turbo.NewRemotingHandler("remoting", k.clientManager)
//对于ack和acceptevent使用不同的线程池,优先级不同
msgPool := turbo.NewLimitPool(k.ctx, 50)
ackPool := turbo.NewLimitPool(k.ctx, 5)
storeAckPool := turbo.NewLimitPool(k.ctx, 5)
defaultPool := turbo.NewLimitPool(k.ctx, 5)
//pools
pools := make(map[uint8]*turbo.GPool)
pools[protocol.CMD_CONN_AUTH] = ackPool
pools[protocol.CMD_HEARTBEAT] = ackPool
pools[protocol.CMD_MESSAGE_STORE_ACK] = storeAckPool
pools[protocol.CMD_TX_ACK] = msgPool
pools[protocol.CMD_BYTES_MESSAGE] = msgPool
pools[protocol.CMD_STRING_MESSAGE] = msgPool
k.pools = pools
k.defaultPool = defaultPool
unmarshal := NewUnmarshalHandler("unmarshal",
pools,
defaultPool)
pipeline.RegisteHandler("unmarshal", unmarshal)
pipeline.RegisteHandler("ack", ackHandler)
pipeline.RegisteHandler("accept", accept)
pipeline.RegisteHandler("remoting", remoting)
k.pipeline = pipeline
//注册kiteqserver的变更
k.registryCenter.RegisterWatcher(k)
hostname, _ := os.Hostname()
//推送本机到
err := k.registryCenter.PublishTopics(k.topics, k.ga.GroupId, hostname)
if nil != err {
log.Errorf("kite|PublishTopics|FAIL|%s|%s", err, k.topics)
} else {
log.Infof("kite|PublishTopics|SUCC|%s", k.topics)
}
outter:
for _, b := range k.binds {
for _, t := range k.topics {
if t == b.Topic {
continue outter
}
}
k.topics = append | s, err := k.registryCenter.GetQServerAndWatch(topic)
if nil != err {
log.Errorf("kite|GetQServerAndWatch|FAIL|%s|%s", err, topic)
} else {
log.Infof("kite|GetQServerAndWatch|SUCC|%s|%s", topic, hosts)
}
k.OnQServerChanged(topic, hosts)
}
length := 0
k.topicToAddress.Range(func(key, value interface{}) bool {
length++
return true
})
if length <= 0 {
log.Errorf("kite|Start|NO VALID KITESERVER|%s", k.topics)
}
if !k.isPreEnv && len(k.binds) > 0 {
//订阅关系推送,并拉取QServer
err = k.registryCenter.PublishBindings(k.ga.GroupId, k.binds)
if nil != err {
log.Errorf("kite|PublishBindings|FAIL|%s|%v", err, k.binds)
}
}
if k.isPreEnv {
rawBinds, _ := json.Marshal(k.binds)
log.Infof("kite|PublishBindings|Ignored|[preEnv:%v]|%s...", k.isPreEnv, string(rawBinds))
}
//开启流量统计
k.remointflow()
go k.heartbeat()
go k.poolMonitor()
}
//poolMonitor
func (k *kite) poolMonitor() {
for {
select {
case <-k.ctx.Done():
break
default:
}
keys := make([]int, 0, len(k.pools))
for cmdType := range k.pools {
keys = append(keys, int(cmdType))
}
sort.Ints(keys)
str := fmt.Sprintf("Cmd-Pool\tGoroutines:%d\t", runtime.NumGoroutine())
for _, cmdType := range keys {
p := k.pools[uint8(cmdType)]
used, capsize := p.Monitor()
str += fmt.Sprintf("%s:%d/%d\t", protocol.NameOfCmd(uint8(cmdType)), used, capsize)
}
used, capsize := k.defaultPool.Monitor()
str += fmt.Sprintf("default:%d/%d\t", used, capsize)
log.Infof(str)
time.Sleep(1 * time.Second)
}
}
//kiteQClient的处理器
func (k *kite) fire(ctx *turbo.TContext) error {
p := ctx.Message
c := ctx.Client
event := turbo.NewPacketEvent(c, p)
err := k.pipeline.FireWork(event)
if nil != err {
log.Errorf("kite|onPacketReceive|FAIL|%s|%v", err, p)
return err
}
return nil
}
//创建物理连接
func dial(hostport string) (*net.TCPConn, error) {
//连接
remoteAddr, err_r := net.ResolveTCPAddr("tcp4", hostport)
if nil != err_r {
log.Errorf("kite|RECONNECT|RESOLVE ADDR |FAIL|remote:%s", err_r)
return nil, err_r
}
conn, err := net.DialTCP("tcp4", nil, remoteAddr)
if nil != err {
log.Errorf("kite|RECONNECT|%s|FAIL|%s", hostport, err)
return nil, err
}
return conn, nil
}
//握手包
func handshake(ga *turbo.GroupAuth, remoteClient *turbo.TClient) (bool, error) {
for i := 0; i < 3; i++ {
p := protocol.MarshalConnMeta(ga.GroupId, ga.SecretKey, int32(ga.WarmingupSec))
rpacket := turbo.NewPacket(protocol.CMD_CONN_META, p)
resp, err := remoteClient.WriteAndGet(*rpacket, 5*time.Second)
if nil != err {
//两秒后重试
time.Sleep(2 * time.Second)
log.Warnf("kiteIO|handShake|FAIL|%s|%s", ga.GroupId, err)
} else {
authAck, ok := resp.(*protocol.ConnAuthAck)
if !ok {
return false, errors.New("Unmatches Handshake Ack Type! ")
} else {
if authAck.GetStatus() {
log.Infof("kiteIO|handShake|SUCC|%s|%s", ga.GroupId, authAck.GetFeedback())
return true, nil
} else {
log.Warnf("kiteIO|handShake|FAIL|%s|%s", ga.GroupId, authAck.GetFeedback())
return false, errors.New("Auth FAIL![" + authAck.GetFeedback() + "]")
}
}
| (k.topics, b.Topic)
}
for _, topic := range k.topics {
host | conditional_block |
sms_fluxes.py | ((diffangle - np.floor( diffangle )) * 360.0) - 180
return diffangle
#################################################################################################
# Compute wind stress and direction
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def calc_wind(u10_wind,v10_wind):
import airsea
from metpy.calc import wind_direction
tau_x=airsea.windstress.stress(u10_wind,z=10,drag='largepond',rho_air=1.22,Ta=10.)
tau_y=airsea.windstress.stress(v10_wind,z=10,drag='largepond',rho_air=1.22,Ta=10.)
tx = np.where(u10_wind>0,tau_x,tau_x*-1) # Add directional signs back to wind stress vectors
ty = np.where(v10_wind>0,tau_y,tau_y*-1)
tau = np.sqrt(tx**2+ty**2)
[tau2,theta]=cart2pol(u10_wind,v10_wind) #to get winds oriented 0 for Easterly.
wind_dir=np.rad2deg(theta)
return tau, tx, ty, wind_dir
#############################################################
# Plot histogram of wind orientation
def plot_wind_orientation(wind_dir,plot_title):
normalise = (wind_dir+ 360) % 360 # First normalise winds from -180-180 to 0-360
ax = plt.subplot(111, projection='polar')
histogram, bins = np.histogram(np.deg2rad(normalise), bins=25)
bin_centers = 0.5*(bins[1:] + bins[:-1])
ax.bar(bin_centers, histogram,label="Wind Orientation",color='lightblue',bottom=0.0,alpha=0.8,edgecolor='tab:blue')
ax.grid(alpha=0.2)
ax.yaxis.get_major_locator().base.set_params(nbins=5)
ax.set_xlabel('{}'.format(plot_title))
return ax
#################################################################################################
# Interpolate winds to glider time step
def glider_compat(var_time,glider_time,var,time_raw=False):
if time_raw == False:
#convert time
var_time = np.int64(var_time)//10**9 * 10**9
sg_time = np.int64(glider_time)//10**9 * 10**9
#interp era5 time to sg time
var_interp = np.interp(sg_time,var_time,var.squeeze())
else:
sg_time = np.int64(glider_time)//10**9 * 10**9
#interp era5 time to sg time
var_interp = np.interp(sg_time,var_time,var.squeeze())
return var_interp
#################################################################################################
# Rotate winds to glider trajectory
def rotate_winds(wind_dir,glider_dir,tau_x,tau_y):
bearing1 = wind_dir
bearing0 = glider_dir
x = np.abs(tau_x)
y = np.abs(tau_y)
angle=np.ndarray(len(bearing1))
for k in range(len(bearing1)):
angle[k]=(calculateDifferenceBetweenAngles(bearing1[k],bearing0[k]))
angle=180-angle
theta=np.deg2rad(angle)
Rtx= x*np.cos(theta)+(y*np.sin(theta))
return Rtx,angle
#################################################################################################
# EQUIVALENT HEAT FLUX OF MIXED LAYER EDDIES
def calc_qmle(buoyancy_gradient_mld,mld,alpha,f=1e-4,cp=4000,po=1027,g=9.8):
"""
Calculates Qmle based on Fox-Kemper 2008
Strong lateral gradients provide a resevoir of potential energy which can be released b ageostrophic overturning circulations as a result of
ageostrophic baroclinic instabilities.
The restratificatoin by ABI is slower than by Symmetric Instabilities but faster than mesoscale variations
Here, ABI is computed as an equivalent heat flux
"""
qmle=0.06*((buoyancy_gradient_mld**2*(mld)**2)/np.abs(f))*((cp*po)/(alpha*g))
return qmle
#################################################################################################
# EKMAN BUOYANCY FLUX
def calc_ebf(buoyancy_gradient_mld,wind_dir,glider_dir,tau_x,tau_y,f,alpha,cp=4000,g=9.8):
"""
Calculates the wind force on the ocean over fronts
Downfront windstress, or wind stress in the direction of geostrophic shear produces an Ekman transport that advects less buoyant over
more buoyant water. An upfront windstress act to restratify the mixed layer.
Theory from Thomas, 2005; Thompson et al., 2016
"""
rotated_wind_component,angle=rotate_winds(wind_dir,glider_dir,tau_x,tau_y)
ebf=(-(buoyancy_gradient_mld)*np.array((rotated_wind_component/f))*(cp/(alpha*g)))
return ebf, rotated_wind_component,angle
##################################################################################################
def calc_n2(SA,CT,rho,depth,alpha,g=9.8,po=1027,gsw=True):
"""
gsw implementation of n2,
decomposed into temp and salt components
"""
if gsw==True:
# From gsw package
import gsw
n2=gsw.Nsquared(SA,CT,depth,axis=0)
n2_t=(-g*alpha*np.diff(CT,axis=0)) # Temperature component
n2_s = n2-n2_t # Salt component
else:
# From buoyancy
by = g * (1-rho / po) # Buoyancy
n2 = np.diff(by*-1,axis=0)
n2_t=(-g*alpha[:-1,:]*np.diff(CT,axis=0)) # Temperature component
n2_s = n2-n2_t # Salt component
return n2,n2_t,n2_s
##################################################################################################
def calc_Lr(rho,mld,f,g=9.8,po=1027.):
"""
Calculates in internal rossby radius of deformation
based on Timmermans and Windsor (2013)
Generally defined as Lr = NH/f
"""
n2ml=np.ndarray(len(rho[1,:-1]))
for i in range(len(rho[1,:-1])):
n2ml[i]=-(g/po)*((rho[15,i]-rho[np.int8(mld[i])+15,i])/mld[i])
Lr=(np.sqrt(n2ml)*mld[:-1])/f
return Lr
##################################################################################################
# Ertel Potential Vorticity
def calc_ertelPV(n2, bx, rel_vorticity, g=9.8,f=-1e-4):
"""
As in Thomas et al., 2013; Thompson et al., 2016
The assumption is made that flow is in geostrophic balance
PV can be used as a diagnostic to indicate the suscepptibility of a flow to instability e.g., Hoskins, 1974; Thomas et al., 2008
When PV takes the opposite sign of f, the flow is inherently unstable (Thomas et al., 2013)
PV can be decomposed into two components:
qvert: The vertical component of absolute vorticity and vertical stratificaton (N2)
qbc: The horizontal relative vorticity (vertical shear) and M2, the horizontal buoyancy gradient
"""
# vertical component
qvert = (f+rel_vorticity)*n2
# baroclinic component
qbc = -bx**2/f
# Ertel PV
ertelPV = qvert + qbc
# If PV is unstable
fq = ertelPV*f # fq > 0 stable
return ertelPV, qvert, qbc, fq
##################################################################################################
# Compute Richardson and Rossby Number
def calc_ri(n2,bx,f):
"""
Calculates the Non-Dimensional modified Richardson Number
Thermal wind balance is assumed
Gives a measure of vertical shear
Ri characterises the dynamic regime and can be interpreted as the steepness of isopycnal slopes relatively to N/f.
Ri >> 1: QG regime, steepness of isopycnal slope is small
Ri ~ 1 (<4), Ageostrophic regime, steepness of isopycnal slope is large
Also used to characterise instabilities as in Thomas et al., 2013
"""
Ri = (f**2*n2)/np.abs(bx)**2
modRi = np.arctan(-Ri**-1)
return Ri, modRi
def calc_ro | (f,vort | identifier_name |
|
sms_fluxes.py | _, alpha, beta = gsw.rho_alpha_beta(SA,CT,y)
return alpha,beta
#################################################################################################
# Buoyancy and buoyancy gradient
def calc_buoyancy(density,SA,CT,alpha,beta,mld,dx=1000,po=1027,g=9.8):
"""
Calculates buoyancy, buoyancy gradient, mean buoyancy gradient in the mixed layer
"""
by = g * (1-density / po) # Buoyancy
by_S = g * (1-beta*SA/po)
by_T = g * (1-alpha*CT/po)
#Raw buoyancy gradient
bgrad = (np.diff(by,axis=1)/dx)
#Buoyancy gradient in the middle of the mixed layer
bxml=np.ndarray(len(density[1,:-1]))
for i in range(len(density[1,:-1])):
|
return by,by_S,by_T,bgrad,bxml
#################################################################################################
# Glider Trajectory
def calculate_initial_compass_bearing(pointA, pointB):
"""
Calculates the bearing between two points.
The formulae used is the following:
θ = atan2(sin(Δlong).cos(lat2),
cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))
:Parameters:
- `pointA: The tuple representing the latitude/longitude for the
first point. Latitude and longitude must be in decimal degrees
- `pointB: The tuple representing the latitude/longitude for the
second point. Latitude and longitude must be in decimal degrees
:Returns:
The bearing in degrees
:Returns Type:
float
"""
import math
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(y, x)
# initial_bearing = np.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
#compass_bearing = (initial_bearing + 360) % 360
return initial_bearing
#def glider_bearing(lon,lat):
# import math
# lon=np.deg2rad(lon)
## lat=np.deg2rad(lat)
# bearing = np.ndarray(len(lon[:-1]))
#bearing=[]#
# for i in range(len(lon[:-1])):
# dlon=np.deg2rad(lon[i+1]-lon[i])
# deltaX = math.cos(lat[i])*math.sin(lat[i+1])-math.sin(lat[i])*math.cos(lat[i+1])*math.cos(dlon)
# deltaY = math.sin(dlon) * math.cos(lat[i+1])
#convert to degrees
# bearing[i]=(math.atan2(deltaX, deltaY))* (180/math.pi) # Compute such that 0 degrees is east
# return bearing
#normalize to compass headings
#bearing = (bearing + 180) % 360
def calc_glider_traj(lat,lon):
bearing=[]
for i in range(len(lon[:-1])):
bearing.append(calculate_initial_compass_bearing((lat[i],lon[i]),(lat[i+1],lon[i+1])))
#bearing=np.array(bearing)
return bearing
#################################################################################################
# Function to compute the difference between two angles
def calculateDifferenceBetweenAngles(bearing1, bearing0):
norm1 = (bearing1 + 360) % 360
norm2 = (bearing0 + 360) % 360
# norm1 = bearing1
# norm2=bearing0
if (norm1)<(norm2):
diffangle = (norm1 - norm2) + 180
diffangle = (diffangle / 180.0)
diffangle = ((diffangle - np.floor( diffangle )) * 360.0) - 180
else:
diffangle = (norm2 - norm1) + 180
diffangle = (diffangle / 180.0)
diffangle = ((diffangle - np.floor( diffangle )) * 360.0) - 180
return diffangle
#################################################################################################
# Compute wind stress and direction
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def calc_wind(u10_wind,v10_wind):
import airsea
from metpy.calc import wind_direction
tau_x=airsea.windstress.stress(u10_wind,z=10,drag='largepond',rho_air=1.22,Ta=10.)
tau_y=airsea.windstress.stress(v10_wind,z=10,drag='largepond',rho_air=1.22,Ta=10.)
tx = np.where(u10_wind>0,tau_x,tau_x*-1) # Add directional signs back to wind stress vectors
ty = np.where(v10_wind>0,tau_y,tau_y*-1)
tau = np.sqrt(tx**2+ty**2)
[tau2,theta]=cart2pol(u10_wind,v10_wind) #to get winds oriented 0 for Easterly.
wind_dir=np.rad2deg(theta)
return tau, tx, ty, wind_dir
#############################################################
# Plot histogram of wind orientation
def plot_wind_orientation(wind_dir,plot_title):
normalise = (wind_dir+ 360) % 360 # First normalise winds from -180-180 to 0-360
ax = plt.subplot(111, projection='polar')
histogram, bins = np.histogram(np.deg2rad(normalise), bins=25)
bin_centers = 0.5*(bins[1:] + bins[:-1])
ax.bar(bin_centers, histogram,label="Wind Orientation",color='lightblue',bottom=0.0,alpha=0.8,edgecolor='tab:blue')
ax.grid(alpha=0.2)
ax.yaxis.get_major_locator().base.set_params(nbins=5)
ax.set_xlabel('{}'.format(plot_title))
return ax
#################################################################################################
# Interpolate winds to glider time step
def glider_compat(var_time,glider_time,var,time_raw=False):
if time_raw == False:
#convert time
var_time = np.int64(var_time)//10**9 * 10**9
sg_time = np.int64(glider_time)//10**9 * 10**9
#interp era5 time to sg time
var_interp = np.interp(sg_time,var_time,var.squeeze())
else:
sg_time = np.int64(glider_time)//10**9 * 10**9
#interp era5 time to sg time
var_interp = np.interp(sg_time,var_time,var.squeeze())
return var_interp
#################################################################################################
# Rotate winds to glider trajectory
def rotate_winds(wind_dir,glider_dir,tau_x,tau_y):
bearing1 = wind_dir
bearing0 = glider_dir
x = np.abs(tau_x)
y = np.abs(tau_y)
angle=np.ndarray(len(bearing1))
for k in range(len(bearing1)):
angle[k]=(calculateDifferenceBetweenAngles(bearing1[k],bearing0[k]))
angle=180-angle
theta=np.deg2rad(angle)
Rtx= x*np.cos(theta)+(y*np.sin(theta))
return Rtx,angle
#################################################################################################
# EQUIVALENT HEAT FLUX OF MIXED LAYER EDDIES
def calc_qmle(buoyancy_gradient_mld,mld,alpha,f=1e-4,cp=4000,po=1027,g=9.8):
"""
Calculates Qmle based on Fox-Kemper 2008
Strong lateral gradients provide a resevoir of potential energy which can be released b ageostrophic overturning circulations as a result of
ageostrophic baroclinic instabilities.
The restratificatoin by ABI is slower than by Symmetric Instabilities but faster than mesoscale variations
Here, ABI is computed as an equivalent heat flux
"""
qmle=0.06*(( | bxml[i]=(np.nanmean(bgrad[:np.int8(mld[i])-15,i],0)) | conditional_block |
sms_fluxes.py |
# Alternative mld based on n2
def cal_mldn2(density,depth,n2,ref_depth=10,threshold=0.05):
mld=np.ndarray(len(density[1,:]))
for i in range(len(density[1,:])):
try:
drange = (depth[(np.abs((density[:,i]-density[ref_depth,i ]))>=threshold)].min())
mld[i]=depth[depth<=drange][n2[depth<=drange,i]==np.nanmax(n2[depth<=drange,i])]#
except ValueError: #raised if `y` is empty.
mld[i]=(np.nan)
return mld
#################################################################################################
# thermal expansion co-efficient, saline contraction coefficient
def alphabeta(SA,CT,depth):
import gsw as gsw
_,y=np.meshgrid(SA[1,:],depth)
_, alpha, beta = gsw.rho_alpha_beta(SA,CT,y)
return alpha,beta
#################################################################################################
# Buoyancy and buoyancy gradient
def calc_buoyancy(density,SA,CT,alpha,beta,mld,dx=1000,po=1027,g=9.8):
"""
Calculates buoyancy, buoyancy gradient, mean buoyancy gradient in the mixed layer
"""
by = g * (1-density / po) # Buoyancy
by_S = g * (1-beta*SA/po)
by_T = g * (1-alpha*CT/po)
#Raw buoyancy gradient
bgrad = (np.diff(by,axis=1)/dx)
#Buoyancy gradient in the middle of the mixed layer
bxml=np.ndarray(len(density[1,:-1]))
for i in range(len(density[1,:-1])):
bxml[i]=(np.nanmean(bgrad[:np.int8(mld[i])-15,i],0))
return by,by_S,by_T,bgrad,bxml
#################################################################################################
# Glider Trajectory
def calculate_initial_compass_bearing(pointA, pointB):
"""
Calculates the bearing between two points.
The formulae used is the following:
θ = atan2(sin(Δlong).cos(lat2),
cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))
:Parameters:
- `pointA: The tuple representing the latitude/longitude for the
first point. Latitude and longitude must be in decimal degrees
- `pointB: The tuple representing the latitude/longitude for the
second point. Latitude and longitude must be in decimal degrees
:Returns:
The bearing in degrees
:Returns Type:
float
"""
import math
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(y, x)
# initial_bearing = np.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
#compass_bearing = (initial_bearing + 360) % 360
return initial_bearing
#def glider_bearing(lon,lat):
# import math
# lon=np.deg2rad(lon)
## lat=np.deg2rad(lat)
# bearing = np.ndarray(len(lon[:-1]))
#bearing=[]#
# for i in range(len(lon[:-1])):
# dlon=np.deg2rad(lon[i+1]-lon[i])
# deltaX = math.cos(lat[i])*math.sin(lat[i+1])-math.sin(lat[i])*math.cos(lat[i+1])*math.cos(dlon)
# deltaY = math.sin(dlon) * math.cos(lat[i+1])
#convert to degrees
# bearing[i]=(math.atan2(deltaX, deltaY))* (180/math.pi) # Compute such that 0 degrees is east
# return bearing
#normalize to compass headings
#bearing = (bearing + 180) % 360
def calc_glider_traj(lat,lon):
bearing=[]
for i in range(len(lon[:-1])):
bearing.append(calculate_initial_compass_bearing((lat[i],lon[i]),(lat[i+1],lon[i+1])))
#bearing=np.array(bearing)
return bearing
#################################################################################################
# Function to compute the difference between two angles
def calculateDifferenceBetweenAngles(bearing1, bearing0):
norm1 = (bearing1 + 360) % 360
norm2 = (bearing0 + 360) % 360
# norm1 = bearing1
# norm2=bearing0
if (norm1)<(norm2):
diffangle = (norm1 - norm2) + 180
diffangle = (diffangle / 180.0)
diffangle = ((diffangle - np.floor( diffangle )) * 360.0) - 180
else:
diffangle = (norm2 - norm1) + 180
diffangle = (diffangle / 180.0)
diffangle = ((diffangle - np.floor( diffangle )) * 360.0) - 180
return diffangle
#################################################################################################
# Compute wind stress and direction
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def calc_wind(u10_wind,v10_wind):
import airsea
from metpy.calc import wind_direction
tau_x=airsea.windstress.stress(u10_wind,z=10,drag='largepond',rho_air=1.22,Ta=10.)
tau_y=airsea.windstress.stress(v10_wind,z=10,drag='largepond',rho_air=1.22,Ta=10.)
tx = np.where(u10_wind>0,tau_x,tau_x*-1) # Add directional signs back to wind stress vectors
ty = np.where(v10_wind>0,tau_y,tau_y*-1)
tau = np.sqrt(tx**2+ty**2)
[tau2,theta]=cart2pol(u10_wind,v10_wind) #to get winds oriented 0 for Easterly.
wind_dir=np.rad2deg(theta)
return tau, tx, ty, wind_dir
#############################################################
# Plot histogram of wind orientation
def plot_wind_orientation(wind_dir,plot_title):
normalise = (wind_dir+ 360) % 360 # First normalise winds from -180-180 to 0-360
ax = plt.subplot(111, projection='polar')
histogram, bins = np.histogram(np.deg2rad(normalise), bins=25)
bin_centers = 0.5*(bins[1:] + bins[:-1])
ax.bar(bin_centers, histogram,label="Wind Orientation",color='lightblue',bottom=0.0,alpha=0.8,edgecolor='tab:blue')
ax.grid(alpha=0.2)
ax.yaxis.get_major_locator().base.set_params(nbins=5)
ax.set_xlabel('{}'.format(plot_title))
return ax
#################################################################################################
# Interpolate winds to glider time step
def glider_compat(var_time,glider_time,var,time_raw=False):
if time_raw == False:
#convert time
var_time = np.int64(var_time)//10**9 * 10**9
sg_time = np.int64(glider_time)//10**9 * 10**9
#interp era5 time to sg time
var_interp = np.interp(sg_time,var_time,var.squeeze())
else:
sg_time = np.int64(glider_time)//10**9 * 10**9
#interp era5 time to sg time
var_interp = np.interp(sg_time,var_time,var.squeeze())
return var_interp
#################################################################################################
# Rotate winds to glider trajectory
def rotate_winds(wind_dir,glider_dir,tau_x,tau_y):
bearing1 = wind_dir
bearing0 = glider_dir
| for i in range(len(density[1,:])):
try: mld[i]=(depth[(np.abs((density[:,i]-density[ref_depth,i ]))>=threshold)].min())
except ValueError: #raised if `y` is empty.
mld[i]=(np.nan)
return mld | random_line_split |
|
sms_fluxes.py | buoyancy gradient
def calc_buoyancy(density,SA,CT,alpha,beta,mld,dx=1000,po=1027,g=9.8):
"""
Calculates buoyancy, buoyancy gradient, mean buoyancy gradient in the mixed layer
"""
by = g * (1-density / po) # Buoyancy
by_S = g * (1-beta*SA/po)
by_T = g * (1-alpha*CT/po)
#Raw buoyancy gradient
bgrad = (np.diff(by,axis=1)/dx)
#Buoyancy gradient in the middle of the mixed layer
bxml=np.ndarray(len(density[1,:-1]))
for i in range(len(density[1,:-1])):
bxml[i]=(np.nanmean(bgrad[:np.int8(mld[i])-15,i],0))
return by,by_S,by_T,bgrad,bxml
#################################################################################################
# Glider Trajectory
def calculate_initial_compass_bearing(pointA, pointB):
"""
Calculates the bearing between two points.
The formulae used is the following:
θ = atan2(sin(Δlong).cos(lat2),
cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))
:Parameters:
- `pointA: The tuple representing the latitude/longitude for the
first point. Latitude and longitude must be in decimal degrees
- `pointB: The tuple representing the latitude/longitude for the
second point. Latitude and longitude must be in decimal degrees
:Returns:
The bearing in degrees
:Returns Type:
float
"""
import math
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(y, x)
# initial_bearing = np.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
#compass_bearing = (initial_bearing + 360) % 360
return initial_bearing
#def glider_bearing(lon,lat):
# import math
# lon=np.deg2rad(lon)
## lat=np.deg2rad(lat)
# bearing = np.ndarray(len(lon[:-1]))
#bearing=[]#
# for i in range(len(lon[:-1])):
# dlon=np.deg2rad(lon[i+1]-lon[i])
# deltaX = math.cos(lat[i])*math.sin(lat[i+1])-math.sin(lat[i])*math.cos(lat[i+1])*math.cos(dlon)
# deltaY = math.sin(dlon) * math.cos(lat[i+1])
#convert to degrees
# bearing[i]=(math.atan2(deltaX, deltaY))* (180/math.pi) # Compute such that 0 degrees is east
# return bearing
#normalize to compass headings
#bearing = (bearing + 180) % 360
def calc_glider_traj(lat,lon):
bearing=[]
for i in range(len(lon[:-1])):
bearing.append(calculate_initial_compass_bearing((lat[i],lon[i]),(lat[i+1],lon[i+1])))
#bearing=np.array(bearing)
return bearing
#################################################################################################
# Function to compute the difference between two angles
def calculateDifferenceBetweenAngles(bearing1, bearing0):
norm1 = (bearing1 + 360) % 360
norm2 = (bearing0 + 360) % 360
# norm1 = bearing1
# norm2=bearing0
if (norm1)<(norm2):
diffangle = (norm1 - norm2) + 180
diffangle = (diffangle / 180.0)
diffangle = ((diffangle - np.floor( diffangle )) * 360.0) - 180
else:
diffangle = (norm2 - norm1) + 180
diffangle = (diffangle / 180.0)
diffangle = ((diffangle - np.floor( diffangle )) * 360.0) - 180
return diffangle
#################################################################################################
# Compute wind stress and direction
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def calc_wind(u10_wind,v10_wind):
import airsea
from metpy.calc import wind_direction
tau_x=airsea.windstress.stress(u10_wind,z=10,drag='largepond',rho_air=1.22,Ta=10.)
tau_y=airsea.windstress.stress(v10_wind,z=10,drag='largepond',rho_air=1.22,Ta=10.)
tx = np.where(u10_wind>0,tau_x,tau_x*-1) # Add directional signs back to wind stress vectors
ty = np.where(v10_wind>0,tau_y,tau_y*-1)
tau = np.sqrt(tx**2+ty**2)
[tau2,theta]=cart2pol(u10_wind,v10_wind) #to get winds oriented 0 for Easterly.
wind_dir=np.rad2deg(theta)
return tau, tx, ty, wind_dir
#############################################################
# Plot histogram of wind orientation
def plot_wind_orientation(wind_dir,plot_title):
normalise = (wind_dir+ 360) % 360 # First normalise winds from -180-180 to 0-360
ax = plt.subplot(111, projection='polar')
histogram, bins = np.histogram(np.deg2rad(normalise), bins=25)
bin_centers = 0.5*(bins[1:] + bins[:-1])
ax.bar(bin_centers, histogram,label="Wind Orientation",color='lightblue',bottom=0.0,alpha=0.8,edgecolor='tab:blue')
ax.grid(alpha=0.2)
ax.yaxis.get_major_locator().base.set_params(nbins=5)
ax.set_xlabel('{}'.format(plot_title))
return ax
#################################################################################################
# Interpolate winds to glider time step
def glider_compat(var_time,glider_time,var,time_raw=False):
if time_raw == False:
#convert time
var_time = np.int64(var_time)//10**9 * 10**9
sg_time = np.int64(glider_time)//10**9 * 10**9
#interp era5 time to sg time
var_interp = np.interp(sg_time,var_time,var.squeeze())
else:
sg_time = np.int64(glider_time)//10**9 * 10**9
#interp era5 time to sg time
var_interp = np.interp(sg_time,var_time,var.squeeze())
return var_interp
#################################################################################################
# Rotate winds to glider trajectory
def rotate_winds(wind_dir,glider_dir,tau_x,tau_y):
bearing1 = wind_dir
bearing0 = glider_dir
x = np.abs(tau_x)
y = np.abs(tau_y)
angle=np.ndarray(len(bearing1))
for k in range(len(bearing1)):
angle[k]=(calculateDifferenceBetweenAngles(bearing1[k],bearing0[k]))
angle=180-angle
theta=np.deg2rad(angle)
Rtx= x*np.cos(theta)+(y*np.sin(theta))
return Rtx,angle
#################################################################################################
# EQUIVALENT HEAT FLUX OF MIXED LAYER EDDIES
def calc_qmle(buoyancy_gradient_mld,mld,alpha,f=1e-4,cp=4000,po=1027,g=9.8):
"""
| Calculates Qmle based on Fox-Kemper 2008
Strong lateral gradients provide a resevoir of potential energy which can be released b ageostrophic overturning circulations as a result of
ageostrophic baroclinic instabilities.
The restratificatoin by ABI is slower than by Symmetric Instabilities but faster than mesoscale variations
Here, ABI is computed as an equivalent heat flux
"""
qmle=0.06*((buoyancy_gradient_mld**2*(mld)**2)/np.abs(f))*((cp*po)/(alpha*g))
return qmle
##### | identifier_body |
|
Fetch.ts | then throw error
if (this._chainScope.setTo('catch')) {
throw e;
} else {
// If no next catch exists, throw error to our safe scope
this._chainScope.callError(e);
}
}
}), this._chainScope);
}
public catch<PromiseResult>(fnc: (e: Error) => PromiseResult): PromiseWraper<T | PromiseResult> {
// push to chain scope;
this._chainScope.chain.push('catch');
return new PromiseWraper(this._promise.catch( (e: Error): PromiseResult => {
// pop from chain scope
this._chainScope.setTo('catch');
this._chainScope.chain.shift();
try {
return fnc(e);
} catch (e) {
// go to next catch in chain, if exists and then throw error
if (this._chainScope.setTo('catch')) {
throw e;
} else {
// If no next catch exists, throw error to our safe scope
this._chainScope.callError(e);
}
}
}), this._chainScope);
}
}
/**
*
* Constants
*
*/
export type RequestMethod = 'GET' | 'POST' | 'PUT' | 'DELETE' | 'OPTIONS' | 'HEAD';
export enum ResponseStatus {
CONTINUE = 100,
SWITCHING = 101,
PROCESSING = 102,
OK = 200,
CREATED = 201,
ACCEPTED = 202,
NON_AUTHORITATIVE = 203,
NO_CONTENT = 204,
RESET_CONTENT = 205,
PARTIAL_CONTENT = 206,
MULTI_STATUS = 207,
MULTIPLE_CHOISES = 300,
MOVED_PERMANENTLY = 301,
FOUND = 302,
SEE_OTHER = 303,
NOT_MODIFIED = 304,
USE_PROXY = 305,
SWITCH_PROXY = 306,
TEMPORARY_REDIRECT = 307,
BAD_REQUEST = 400,
UNAUTHORIZED = 401,
PAYMENT_REQUIRED = 402,
FORBIDDEN = 403,
NOT_FOUND = 404,
METHOD_NOT_ALLOWED = 405,
NOT_ACCEPTABLE = 406,
AUTHENTICATION_REQUIRED = 407,
REQUEST_TIMEOUT = 408,
CONFLICT = 409,
GONE = 410,
LENGTH_REQUIRED = 411,
PRECONDITION_FAILED = 412,
ENTITY_TOO_LARGE = 413,
URI_TOO_LONG = 414,
UNSOPPORTED_MEDIA_TYPE = 415,
REQUEST_RANGE_NOT_SATISFIABLE = 416,
EXPECTATION_FAILED = 417,
IAM_TEAPOD = 418,
UNPROCESSABLE_ENTITY = 422,
LOCKED = 423,
FAILED_DEPENDENCY = 424,
UNORDERED_COLLECTION = 425,
UPGRADE_REQUIRED = 426,
RETRY_WITH = 449,
BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450,
UNAVAILABLE_FOR_LEGAL_REASON = 451,
CLIENT_CLOSED_REQUEST = 499,
INTERNAL_SERVER_ERROR = 500,
NOT_IMPLEMENTED = 501,
BAD_GATEWAY = 502,
SERVICE_UNAVAILABLE = 503,
GATEWAY_TIMEOUT = 504,
HTTP_VERSION_NOT_SUPPORTED = 505,
VARIANT_ALSO_NEGOTIATES = 506,
INSUFFIACIENT_STORAGE = 507,
BANDWIDTH_LIMIT_EXCEEDED = 509,
NOT_EXTENDED = 510
}
/**
*
* Definition of protocol for communication between this service and proxy
*
*/
export const RequestDefKeys = ['url', 'method', 'headers', 'body', 'timeout', 'max_redirects', 'reject_unauthorized', 'follow_redirect', 'auth_user', 'auth_pass'];
export interface RequestDef {
url: string;
method: string;
headers: {[key: string]: string};
body?: any;
timeout?: number;
max_redirects?: number;
reject_unauthorized?: boolean;
follow_redirect?: boolean;
auth_user?: string;
auth_pass?: string;
};
export interface ResponseDef {
status_code: number;
headers: {[key: string]: string};
body: any;
error_type?: string;
error_code?: string;
error_message?: string;
};
/**
*
* Error in communication with proxy
*
*/
export class ProxyCommunicationError extends Error {
message: string;
type: string;
constructor(type: string, message: string) {
super(message);
this.name = 'ProxyCommunicationError';
this.message = message;
this.type = type;
(<any>this).__proto__ = ProxyCommunicationError.prototype;
}
}
/**
*
* Error in communication with target server
*
*/
export class FetchError extends Error {
message: string;
code: string;
constructor(code: string, message: string) {
super(message);
this.name = 'FetchError';
this.message = message;
this.code = code;
(<any>this).__proto__ = ProxyCommunicationError.prototype;
}
}
/**
*
* Static helper for executing fetch request to our proxy server
*
*/
export class FetchExecutor {
/**
*
* Utilities
*
*/
public static b64EncodeUnicode(str: string) {
return btoaFunction(encodeURIComponent(str).replace(/%([0-9A-F]{2})/g, function(match, p1) {
return String.fromCharCode(<any>('0x' + p1));
}));
}
public static b64DecodeUnicode(str) {
return decodeURIComponent(atobFunction(str).split('').map(function(c) {
return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2);
}).join(''));
}
public static copyRequestDef(input: RequestDef, defaultHeaders?: {[key: string]: string}): RequestDef {
let def: RequestDef = <any>{};
for (let i = 0; i < RequestDefKeys.length; i++) {
if (input[RequestDefKeys[i]]) {
def[RequestDefKeys[i]] = input[RequestDefKeys[i]];
}
}
// prepare headers
if (!def.headers) {
if (defaultHeaders) {
def.headers = defaultHeaders;
} else {
def.headers = {};
}
}
return def;
}
public static headersToLower(headers: {[key: string]: string}): {[key: string]: string} {
let headersCopy = {};
for (let i in headers) {
if (headers.hasOwnProperty(i)) {
headersCopy[i.toLowerCase()] = headers[i];
}
}
return headersCopy;
}
/**
*
* Fetch request, that will be sended to proxy server
*
* It will be parsed from json and base64 and you will get promise, that looks like normal fetch from browser,
* but its not!. You will get object FetchResponse, that is not same as standart response object.
*
*/
static fetch(machine: Machine, params: RequestDef, additionalParams?: {[key: string]: string}, proxyUrl?: string): PromiseWraper<FetchResponse> {
let paramsCopy = FetchExecutor.copyRequestDef(params);
// prepare headers
if (paramsCopy.headers) {
paramsCopy.headers = FetchExecutor.headersToLower(paramsCopy.headers);
}
// prepare user request payload
if (params.body && typeof params.body === 'object') {
paramsCopy.body = JSON.stringify(params.body);
}
// prepare payload as base64
if (paramsCopy.body) {
paramsCopy.body = FetchExecutor.b64EncodeUnicode(paramsCopy.body);
}
if (additionalParams) {
for (let i in additionalParams) {
if (additionalParams.hasOwnProperty(i)) {
paramsCopy[i] = additionalParams[i];
}
}
}
// paramsCopy['auth_token'] = ''; // TODO auth token from controller
// prepare fetch configuration
let fetchParams = {
method: 'POST',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cache': 'no-cache'
},
body: JSON.stringify(paramsCopy)
};
// status of request
let status: number = null;
const proxyServerUrl = proxyUrl ? proxyUrl : FetchRequest.PROXY_SERVER_URL;
// do it!, post our request to proxy
const ret = fetchFunction(proxyServerUrl, fetchParams).then((response: Response) => {
status = response.status;
return <ResponseDef><any>response.json();
}).then((response: ResponseDef) => {
// request to proxy failed
if (response.error_type && response.error_message) {
throw new ProxyCommunicationError(response.error_type, response.error_message);
}
// checking status between proxy and service
if (status === ResponseStatus.OK) | {
return response;
} | conditional_block |
|
Fetch.ts | 2,
LOCKED = 423,
FAILED_DEPENDENCY = 424,
UNORDERED_COLLECTION = 425,
UPGRADE_REQUIRED = 426,
RETRY_WITH = 449,
BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450,
UNAVAILABLE_FOR_LEGAL_REASON = 451,
CLIENT_CLOSED_REQUEST = 499,
INTERNAL_SERVER_ERROR = 500,
NOT_IMPLEMENTED = 501,
BAD_GATEWAY = 502,
SERVICE_UNAVAILABLE = 503,
GATEWAY_TIMEOUT = 504,
HTTP_VERSION_NOT_SUPPORTED = 505,
VARIANT_ALSO_NEGOTIATES = 506,
INSUFFIACIENT_STORAGE = 507,
BANDWIDTH_LIMIT_EXCEEDED = 509,
NOT_EXTENDED = 510
}
/**
*
* Definition of protocol for communication between this service and proxy
*
*/
export const RequestDefKeys = ['url', 'method', 'headers', 'body', 'timeout', 'max_redirects', 'reject_unauthorized', 'follow_redirect', 'auth_user', 'auth_pass'];
export interface RequestDef {
url: string;
method: string;
headers: {[key: string]: string};
body?: any;
timeout?: number;
max_redirects?: number;
reject_unauthorized?: boolean;
follow_redirect?: boolean;
auth_user?: string;
auth_pass?: string;
};
export interface ResponseDef {
status_code: number;
headers: {[key: string]: string};
body: any;
error_type?: string;
error_code?: string;
error_message?: string;
};
/**
*
* Error in communication with proxy
*
*/
export class ProxyCommunicationError extends Error {
message: string;
type: string;
constructor(type: string, message: string) {
super(message);
this.name = 'ProxyCommunicationError';
this.message = message;
this.type = type;
(<any>this).__proto__ = ProxyCommunicationError.prototype;
}
}
/**
*
* Error in communication with target server
*
*/
export class FetchError extends Error {
message: string;
code: string;
constructor(code: string, message: string) {
super(message);
this.name = 'FetchError';
this.message = message;
this.code = code;
(<any>this).__proto__ = ProxyCommunicationError.prototype;
}
}
/**
*
* Static helper for executing fetch request to our proxy server
*
*/
export class FetchExecutor {
/**
*
* Utilities
*
*/
public static b64EncodeUnicode(str: string) {
return btoaFunction(encodeURIComponent(str).replace(/%([0-9A-F]{2})/g, function(match, p1) {
return String.fromCharCode(<any>('0x' + p1));
}));
}
public static b64DecodeUnicode(str) {
return decodeURIComponent(atobFunction(str).split('').map(function(c) {
return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2);
}).join(''));
}
public static copyRequestDef(input: RequestDef, defaultHeaders?: {[key: string]: string}): RequestDef {
let def: RequestDef = <any>{};
for (let i = 0; i < RequestDefKeys.length; i++) {
if (input[RequestDefKeys[i]]) {
def[RequestDefKeys[i]] = input[RequestDefKeys[i]];
}
}
// prepare headers
if (!def.headers) {
if (defaultHeaders) {
def.headers = defaultHeaders;
} else {
def.headers = {};
}
}
return def;
}
public static headersToLower(headers: {[key: string]: string}): {[key: string]: string} {
let headersCopy = {};
for (let i in headers) {
if (headers.hasOwnProperty(i)) {
headersCopy[i.toLowerCase()] = headers[i];
}
}
return headersCopy;
}
/**
*
* Fetch request, that will be sended to proxy server
*
* It will be parsed from json and base64 and you will get promise, that looks like normal fetch from browser,
* but its not!. You will get object FetchResponse, that is not same as standart response object.
*
*/
static fetch(machine: Machine, params: RequestDef, additionalParams?: {[key: string]: string}, proxyUrl?: string): PromiseWraper<FetchResponse> {
let paramsCopy = FetchExecutor.copyRequestDef(params);
// prepare headers
if (paramsCopy.headers) {
paramsCopy.headers = FetchExecutor.headersToLower(paramsCopy.headers);
}
// prepare user request payload
if (params.body && typeof params.body === 'object') {
paramsCopy.body = JSON.stringify(params.body);
}
// prepare payload as base64
if (paramsCopy.body) {
paramsCopy.body = FetchExecutor.b64EncodeUnicode(paramsCopy.body);
}
if (additionalParams) {
for (let i in additionalParams) {
if (additionalParams.hasOwnProperty(i)) {
paramsCopy[i] = additionalParams[i];
}
}
}
// paramsCopy['auth_token'] = ''; // TODO auth token from controller
// prepare fetch configuration
let fetchParams = {
method: 'POST',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cache': 'no-cache'
},
body: JSON.stringify(paramsCopy)
};
// status of request
let status: number = null;
const proxyServerUrl = proxyUrl ? proxyUrl : FetchRequest.PROXY_SERVER_URL;
// do it!, post our request to proxy
const ret = fetchFunction(proxyServerUrl, fetchParams).then((response: Response) => {
status = response.status;
return <ResponseDef><any>response.json();
}).then((response: ResponseDef) => {
// request to proxy failed
if (response.error_type && response.error_message) {
throw new ProxyCommunicationError(response.error_type, response.error_message);
}
// checking status between proxy and service
if (status === ResponseStatus.OK) {
return response;
} else {
throw new ProxyCommunicationError('unknown', 'Internal request failed (status: ' + status + ')');
}
}).then((response: ResponseDef) => {
// request from proxy to external server failed
if (response.error_type && response.error_message) {
throw new FetchError(response.error_code, response.error_message);
}
// is there body in response?
let body = null;
if (response.body) {
body = FetchExecutor.b64DecodeUnicode(response.body);
}
// is there headers in response?
let headers = null;
if (response.headers) {
headers = FetchExecutor.headersToLower(response.headers);
}
return new FetchResponse(response.status_code, headers, body);
});
return new PromiseWraper<FetchResponse>(ret, new PromiseChainScope(machine));
}
}
/**
*
* Request object - holder of neccessary informations
*
*/
export class FetchRequest implements RequestDef {
public url: string;
public method: string;
public headers: {[key: string]: string};
public body?: any;
public timeout?: number;
public max_redirects?: number;
public reject_unauthorized?: boolean;
public follow_redirect?: boolean;
public auth_user?: string;
public auth_pass?: string;
/**
*
* Our byzance proxy url
*
*/
static PROXY_SERVER_URL = 'http://127.0.0.1:4000/fetch/'; // 'http://192.168.65.30:3000/fetch/';//'https://someproxyshit.biz/fetch-proxy';
public constructor(method: RequestMethod, url: string, body: any = null) {
this.method = method;
this.url = url;
this.body = body;
}
}
/**
*
* Response object
*
*/
export class FetchResponse {
private _status: number;
private _headers: {[key: string]: string};
private _body: any;
constructor(status: number, headers: {[key: string]: string}, body: any) {
this._status = status;
this._headers = headers;
this._body = body;
}
public get status(): number {
return this._status;
}
public get headers(): {[key: string]: string} {
return this._headers;
}
public get body(): any {
return this._body;
}
}
/**
*
* Requests with defined methods
*
*/
export class GetRequest extends FetchRequest {
public constructor(url: string) {
super('GET', url);
}
}
export class PostRequest extends FetchRequest {
public constructor(url: string, body: any = null) {
super('POST', url, body);
}
}
export class PutRequest extends FetchRequest {
public constructor(url: string, body: any = null) {
super('PUT', url, body);
}
}
export class DeleteRequest extends FetchRequest {
public constructor(url: string) {
super('DELETE', url);
}
}
export class | HeadRequest | identifier_name |
|
Fetch.ts | this._promise = promise;
this._chainScope = chainScope;
if (!this._chainScope) {
this._chainScope = new PromiseChainScope();
}
}
public then<PromiseResult>(fnc: (value: T) => PromiseResult): PromiseWraper<PromiseResult> {
// push to chain scope;
this._chainScope.chain.push('then');
return new PromiseWraper(this._promise.then( (value: T): PromiseResult => {
// shift from chain scope
this._chainScope.setTo('then');
this._chainScope.chain.shift();
try {
return fnc(value);
} catch (e) {
// go to next catch in chain, if exists and then throw error
if (this._chainScope.setTo('catch')) {
throw e;
} else {
// If no next catch exists, throw error to our safe scope
this._chainScope.callError(e);
}
}
}), this._chainScope);
}
public catch<PromiseResult>(fnc: (e: Error) => PromiseResult): PromiseWraper<T | PromiseResult> {
// push to chain scope;
this._chainScope.chain.push('catch');
return new PromiseWraper(this._promise.catch( (e: Error): PromiseResult => {
// pop from chain scope
this._chainScope.setTo('catch');
this._chainScope.chain.shift();
try {
return fnc(e);
} catch (e) {
// go to next catch in chain, if exists and then throw error
if (this._chainScope.setTo('catch')) {
throw e;
} else {
// If no next catch exists, throw error to our safe scope
this._chainScope.callError(e);
}
}
}), this._chainScope);
}
}
/**
*
* Constants
*
*/
export type RequestMethod = 'GET' | 'POST' | 'PUT' | 'DELETE' | 'OPTIONS' | 'HEAD';
export enum ResponseStatus {
CONTINUE = 100,
SWITCHING = 101,
PROCESSING = 102,
OK = 200,
CREATED = 201,
ACCEPTED = 202,
NON_AUTHORITATIVE = 203,
NO_CONTENT = 204,
RESET_CONTENT = 205,
PARTIAL_CONTENT = 206,
MULTI_STATUS = 207,
MULTIPLE_CHOISES = 300,
MOVED_PERMANENTLY = 301,
FOUND = 302,
SEE_OTHER = 303,
NOT_MODIFIED = 304,
USE_PROXY = 305,
SWITCH_PROXY = 306,
TEMPORARY_REDIRECT = 307,
BAD_REQUEST = 400,
UNAUTHORIZED = 401,
PAYMENT_REQUIRED = 402,
FORBIDDEN = 403,
NOT_FOUND = 404,
METHOD_NOT_ALLOWED = 405,
NOT_ACCEPTABLE = 406,
AUTHENTICATION_REQUIRED = 407,
REQUEST_TIMEOUT = 408,
CONFLICT = 409,
GONE = 410,
LENGTH_REQUIRED = 411,
PRECONDITION_FAILED = 412,
ENTITY_TOO_LARGE = 413,
URI_TOO_LONG = 414,
UNSOPPORTED_MEDIA_TYPE = 415,
REQUEST_RANGE_NOT_SATISFIABLE = 416,
EXPECTATION_FAILED = 417,
IAM_TEAPOD = 418,
UNPROCESSABLE_ENTITY = 422,
LOCKED = 423,
FAILED_DEPENDENCY = 424,
UNORDERED_COLLECTION = 425,
UPGRADE_REQUIRED = 426,
RETRY_WITH = 449,
BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450,
UNAVAILABLE_FOR_LEGAL_REASON = 451,
CLIENT_CLOSED_REQUEST = 499,
INTERNAL_SERVER_ERROR = 500,
NOT_IMPLEMENTED = 501,
BAD_GATEWAY = 502,
SERVICE_UNAVAILABLE = 503,
GATEWAY_TIMEOUT = 504,
HTTP_VERSION_NOT_SUPPORTED = 505,
VARIANT_ALSO_NEGOTIATES = 506,
INSUFFIACIENT_STORAGE = 507,
BANDWIDTH_LIMIT_EXCEEDED = 509,
NOT_EXTENDED = 510
}
/**
*
* Definition of protocol for communication between this service and proxy
*
*/
export const RequestDefKeys = ['url', 'method', 'headers', 'body', 'timeout', 'max_redirects', 'reject_unauthorized', 'follow_redirect', 'auth_user', 'auth_pass'];
export interface RequestDef {
url: string;
method: string;
headers: {[key: string]: string};
body?: any;
timeout?: number;
max_redirects?: number;
reject_unauthorized?: boolean;
follow_redirect?: boolean;
auth_user?: string;
auth_pass?: string;
};
export interface ResponseDef {
status_code: number;
headers: {[key: string]: string};
body: any;
error_type?: string;
error_code?: string;
error_message?: string;
};
/**
*
* Error in communication with proxy
*
*/
export class ProxyCommunicationError extends Error {
message: string;
type: string;
constructor(type: string, message: string) {
super(message);
this.name = 'ProxyCommunicationError';
this.message = message;
this.type = type;
(<any>this).__proto__ = ProxyCommunicationError.prototype;
}
}
/**
*
* Error in communication with target server
*
*/
export class FetchError extends Error {
message: string;
code: string;
constructor(code: string, message: string) {
super(message);
this.name = 'FetchError';
this.message = message;
this.code = code;
(<any>this).__proto__ = ProxyCommunicationError.prototype;
}
}
/**
*
* Static helper for executing fetch request to our proxy server
*
*/
export class FetchExecutor {
/**
*
* Utilities
*
*/
public static b64EncodeUnicode(str: string) {
return btoaFunction(encodeURIComponent(str).replace(/%([0-9A-F]{2})/g, function(match, p1) {
return String.fromCharCode(<any>('0x' + p1)); | return decodeURIComponent(atobFunction(str).split('').map(function(c) {
return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2);
}).join(''));
}
public static copyRequestDef(input: RequestDef, defaultHeaders?: {[key: string]: string}): RequestDef {
let def: RequestDef = <any>{};
for (let i = 0; i < RequestDefKeys.length; i++) {
if (input[RequestDefKeys[i]]) {
def[RequestDefKeys[i]] = input[RequestDefKeys[i]];
}
}
// prepare headers
if (!def.headers) {
if (defaultHeaders) {
def.headers = defaultHeaders;
} else {
def.headers = {};
}
}
return def;
}
public static headersToLower(headers: {[key: string]: string}): {[key: string]: string} {
let headersCopy = {};
for (let i in headers) {
if (headers.hasOwnProperty(i)) {
headersCopy[i.toLowerCase()] = headers[i];
}
}
return headersCopy;
}
/**
*
* Fetch request, that will be sended to proxy server
*
* It will be parsed from json and base64 and you will get promise, that looks like normal fetch from browser,
* but its not!. You will get object FetchResponse, that is not same as standart response object.
*
*/
static fetch(machine: Machine, params: RequestDef, additionalParams?: {[key: string]: string}, proxyUrl?: string): PromiseWraper<FetchResponse> {
let paramsCopy = FetchExecutor.copyRequestDef(params);
// prepare headers
if (paramsCopy.headers) {
paramsCopy.headers = FetchExecutor.headersToLower(paramsCopy.headers);
}
// prepare user request payload
if (params.body && typeof params.body === 'object') {
paramsCopy.body = JSON.stringify(params.body);
}
// prepare payload as base64
if (paramsCopy.body) {
paramsCopy.body = FetchExecutor.b64EncodeUnicode(paramsCopy.body);
}
if (additionalParams) {
for (let i in additionalParams) {
if (additionalParams.hasOwnProperty(i)) {
paramsCopy[i] = additionalParams[i];
}
}
}
// paramsCopy['auth_token'] = ''; // TODO auth token from controller
// prepare fetch configuration
let fetchParams = {
method: 'POST',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cache': 'no-cache'
},
body: JSON.stringify(paramsCopy)
};
| }));
}
public static b64DecodeUnicode(str) { | random_line_split |
grasp_pos.py | rgb_stream.close()
self.rgb_to_world_rmat = self.rgb_rmat.T
self.rgb_to_world_tvec = -np.dot(self.rgb_rmat.T, self.rgb_tvec)
self.ir_to_world_rmat = self.ir_rmat.T
self.ir_to_world_tvec = -np.dot(self.ir_rmat.T, self.ir_tvec)
def load_intrinsics(self):
depth_stream = open("/home/chentao/kinect_calibration/depth_0000000000000000.yaml", "r")
depth_doc = yaml.load(depth_stream)
self.depth_mtx = np.array(depth_doc['camera_matrix']['data']).reshape(3,3)
self.depth_dist = np.array(depth_doc['distortion_coefficients']['data'])
depth_stream.close()
rgb_stream = open("/home/chentao/kinect_calibration/rgb_0000000000000000.yaml", "r")
rgb_doc = yaml.load(rgb_stream)
self.rgb_mtx = np.array(rgb_doc['camera_matrix']['data']).reshape(3,3)
self.rgb_dist = np.array(rgb_doc['distortion_coefficients']['data'])
rgb_stream.close()
def img_to_world(self, pix_point):
if self.depth_image == None or self.rgb_image == None:
return
# pix_point is (u,v) : the coordinates on the image
depth_pix_point = np.array([pix_point[0], pix_point[1], 1]) * self.depth_image[pix_point[1], pix_point[0]]
depth_coord_point = np.dot(np.linalg.inv(self.rgb_mtx), depth_pix_point.reshape(-1,1))
point_in_world = np.dot(self.rgb_to_world_rmat, depth_coord_point.reshape(-1,1)) + self.rgb_to_world_tvec
point_in_world[0] += x_offset
point_in_world[1] += y_offset
point_in_world[2] += z_offset
return point_in_world
def get_center_point(self):
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
pix_point = np.zeros(2)
pix_point[0] = (self.ix1 + self.ix2) / 2
pix_point[1] = (self.iy1 + self.iy2) / 2
# print "center point in image: ",pix_point
return pix_point
def get_orientation(self):
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
if self.ix1 > self.ix2:
temp = self.ix2
self.ix2 = self.ix1
self.ix1 = temp
if self.iy1 > self.iy2:
temp = self.iy2
self.iy2 = self.iy1
self.iy1 = temp
roi_width = self.ix2 - self.ix1 + 1
roi_height = self.iy2 - self.iy1 + 1
roi = self.rgb_image[self.iy1:self.iy2 + 1, self.ix1:self.ix2 + 1, :].reshape(-1,3).astype(float)
roi = preprocessing.scale(roi)
# # KMeans
kmeans = KMeans(n_clusters=2)
kmeans.fit(roi)
y = kmeans.predict(roi)
y = y.reshape(roi_height, roi_width)
# Find the mode of the cluster index in the boundary, assume that the mode represent the cluster of background
wid = 10
t = np.append(y[:,:wid].reshape(-1,1),y[:,y.shape[1] - wid:].reshape(-1,1))
t = np.append(t, y[y.shape[0] - wid:,wid:y.shape[1] - wid].reshape(-1,1))
t = np.append(t, y[0:wid, wid:y.shape[1] - wid].reshape(-1,1))
# since the cluster index can only be 0 or 1 here,so if the background is 0, then our target is 1, vice versa.
interested_cluster = 1 - stats.mode(t)[0][0]
interested_cluster_indices = np.array(np.where(y == interested_cluster))
interested_cluster_indices[0] += self.iy1
interested_cluster_indices[1] += self.ix1
tempimg = self.rgb_image.copy()
tempimg[interested_cluster_indices[0], interested_cluster_indices[1],:] = np.zeros((1,3))
# Grab Cut
# mask = np.zeros(self.rgb_image.shape[:2],np.uint8)
# bgdModel = np.zeros((1,65),np.float64)
# fgdModel = np.zeros((1,65),np.float64)
# ix1 = max(self.ix1,1)
# iy1 = max(self.iy1,1)
# ix2 = max(self.ix2,1)
# iy2 = max(self.iy2,1)
# ix1 = min(self.ix1,self.rgb_image.shape[1])
# iy1 = min(self.iy1,self.rgb_image.shape[0])
# ix2 = min(self.ix2,self.rgb_image.shape[1])
# iy2 = min(self.iy2,self.rgb_image.shape[0])
# # print "ix1: ",ix1
# # print "iy1: ",iy1
# # print "ix2: ",ix2
# # print "iy2: ",iy2
# rect = (ix1,iy1,ix2,iy2)
# print "Grab Cut Started..."
# cv2.grabCut(self.rgb_image,mask,rect,bgdModel,fgdModel,10,cv2.GC_INIT_WITH_RECT)
# print "Grab Cut done..."
# # all 0-pixels and 2-pixels are put to 0 (ie background) and all 1-pixels and 3-pixels are put to 1(ie foreground pixels)
# mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
# interested_cluster_indices = np.where(mask2 == 1)
# tempimg = self.rgb_image*mask2[:,:,np.newaxis]
cv2.imshow("Segmentation",tempimg)
cv2.waitKey(5)
roi_points = []
for i,j in zip(interested_cluster_indices[0], interested_cluster_indices[1]):
pix_point =np.zeros(2)
pix_point[0] = j
pix_point[1] = i
point_temp = self.img_to_world(pix_point)
if np.isnan(point_temp).sum() == 0:
roi_points.append(point_temp)
roi_points = np.array(roi_points).reshape(-1,3)
# Remove the possible outliers
# roi_points = roi_points[roi_points[:,2] > np.percentile(roi_points[:,2],25) and roi_points[:,2] < np.percentile(roi_points[:,2],75)]
# print roi_points.shape
roi_points = roi_points[roi_points[:,2] > np.percentile(roi_points[:,2],25)]
# print roi_points.shape
roi_points = roi_points[roi_points[:,2] < np.percentile(roi_points[:,2],50 / 75.0 * 100)]
# print roi_points.shape
# Find Normal Vector, use a plane to fit these data
y = roi_points[:,2]
X = roi_points[:,:2]
model = linear_model.LinearRegression()
model.fit(X, y)
normal_vector = np.zeros(3)
normal_vector[0] = -model.coef_[0]
normal_vector[1] = -model.coef_[1]
normal_vector[2] = 1
cos_alpha = np.zeros(3)
alpha = np.zeros(3)
for i in range(3):
cos_alpha[i] = normal_vector[i] / np.linalg.norm(normal_vector)
alpha = np.arccos(cos_alpha)
# # print "normal vector:",normal_vector
# # print "cos_alpha:",cos_alpha
# # print "alpha:",alpha
# # Find the radius for the cylinder
# # https://scipy.github.io/old-wiki/pages/Cookbook/Least_Squares_Circle.html
# self.points_x = X[:,0]
# self.points_y = X[:,1]
# x_m = np.mean(self.points_x)
# y_m = np.mean(self.points_y)
# center_estimate = np.array([x_m, y_m])
# center, ier = optimize.leastsq(self.circle_fit_cost, center_estimate)
# R = np.sqrt((self.points_x - center[0]) ** 2 + (self.points_y - center[1]) ** 2).mean()
# return alpha, R
return alpha, normal_vector
def circle_fit_cost(self, c):
Ri = np.sqrt((self.points_x - c[0]) ** 2 + (self.points_y - c[1]) ** 2)
return Ri - Ri.mean()
def | draw_rect | identifier_name |
|
grasp_pos.py | 1
cv2.namedWindow('RGB Image')
cv2.setMouseCallback('RGB Image',self.draw_rect)
def depth_callback(self,data):
try:
self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
except CvBridgeError as e:
print(e)
# print "depth"
depth_min = np.nanmin(self.depth_image)
depth_max = np.nanmax(self.depth_image)
depth_img = self.depth_image.copy()
depth_img[np.isnan(self.depth_image)] = depth_min
depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
cv2.imshow("Depth Image", depth_img)
cv2.waitKey(5)
def rgb_callback(self,data):
try:
self.rgb_image = self.br.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
tempimg = self.rgb_image.copy()
if self.drawing or self.rect_done:
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
cv2.rectangle(tempimg,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2)
if self.rect_done:
center_point = self.get_center_point()
cv2.circle(tempimg, tuple(center_point.astype(int)), 3, (0,0,255),-1)
cv2.imshow('RGB Image', tempimg)
cv2.waitKey(5)
# print "rgb"
def load_extrinsics(self):
ir_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "r")
ir_doc = yaml.load(ir_stream)
self.ir_rmat = np.array(ir_doc['rmat']).reshape(3,3)
self.ir_tvec = np.array(ir_doc['tvec'])
ir_stream.close()
rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "r")
rgb_doc = yaml.load(rgb_stream)
self.rgb_rmat = np.array(rgb_doc['rmat']).reshape(3,3)
self.rgb_tvec = np.array(rgb_doc['tvec'])
rgb_stream.close()
self.rgb_to_world_rmat = self.rgb_rmat.T
self.rgb_to_world_tvec = -np.dot(self.rgb_rmat.T, self.rgb_tvec)
self.ir_to_world_rmat = self.ir_rmat.T
self.ir_to_world_tvec = -np.dot(self.ir_rmat.T, self.ir_tvec)
def load_intrinsics(self):
|
depth_doc = yaml.load(depth_stream)
self.depth_mtx = np.array(depth_doc['camera_matrix']['data']).reshape(3,3)
self.depth_dist = np.array(depth_doc['distortion_coefficients']['data'])
depth_stream.close()
rgb_stream = open("/home/chentao/kinect_calibration/rgb_0000000000000000.yaml", "r")
rgb_doc = yaml.load(rgb_stream)
self.rgb_mtx = np.array(rgb_doc['camera_matrix']['data']).reshape(3,3)
self.rgb_dist = np.array(rgb_doc['distortion_coefficients']['data'])
rgb_stream.close()
def img_to_world(self, pix_point):
if self.depth_image == None or self.rgb_image == None:
return
# pix_point is (u,v) : the coordinates on the image
depth_pix_point = np.array([pix_point[0], pix_point[1], 1]) * self.depth_image[pix_point[1], pix_point[0]]
depth_coord_point = np.dot(np.linalg.inv(self.rgb_mtx), depth_pix_point.reshape(-1,1))
point_in_world = np.dot(self.rgb_to_world_rmat, depth_coord_point.reshape(-1,1)) + self.rgb_to_world_tvec
point_in_world[0] += x_offset
point_in_world[1] += y_offset
point_in_world[2] += z_offset
return point_in_world
def get_center_point(self):
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
pix_point = np.zeros(2)
pix_point[0] = (self.ix1 + self.ix2) / 2
pix_point[1] = (self.iy1 + self.iy2) / 2
# print "center point in image: ",pix_point
return pix_point
def get_orientation(self):
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
if self.ix1 > self.ix2:
temp = self.ix2
self.ix2 = self.ix1
self.ix1 = temp
if self.iy1 > self.iy2:
temp = self.iy2
self.iy2 = self.iy1
self.iy1 = temp
roi_width = self.ix2 - self.ix1 + 1
roi_height = self.iy2 - self.iy1 + 1
roi = self.rgb_image[self.iy1:self.iy2 + 1, self.ix1:self.ix2 + 1, :].reshape(-1,3).astype(float)
roi = preprocessing.scale(roi)
# # KMeans
kmeans = KMeans(n_clusters=2)
kmeans.fit(roi)
y = kmeans.predict(roi)
y = y.reshape(roi_height, roi_width)
# Find the mode of the cluster index in the boundary, assume that the mode represent the cluster of background
wid = 10
t = np.append(y[:,:wid].reshape(-1,1),y[:,y.shape[1] - wid:].reshape(-1,1))
t = np.append(t, y[y.shape[0] - wid:,wid:y.shape[1] - wid].reshape(-1,1))
t = np.append(t, y[0:wid, wid:y.shape[1] - wid].reshape(-1,1))
# since the cluster index can only be 0 or 1 here,so if the background is 0, then our target is 1, vice versa.
interested_cluster = 1 - stats.mode(t)[0][0]
interested_cluster_indices = np.array(np.where(y == interested_cluster))
interested_cluster_indices[0] += self.iy1
interested_cluster_indices[1] += self.ix1
tempimg = self.rgb_image.copy()
tempimg[interested_cluster_indices[0], interested_cluster_indices[1],:] = np.zeros((1,3))
# Grab Cut
# mask = np.zeros(self.rgb_image.shape[:2],np.uint8)
# bgdModel = np.zeros((1,65),np.float64)
# fgdModel = np.zeros((1,65),np.float64)
# ix1 = max(self.ix1,1)
# iy1 = max(self.iy1,1)
# ix2 = max(self.ix2,1)
# iy2 = max(self.iy2,1)
# ix1 = min(self.ix1,self.rgb_image.shape[1])
# iy1 = min(self.iy1,self.rgb_image.shape[0])
# ix2 = min(self.ix2,self.rgb_image.shape[1])
# iy2 = min(self.iy2,self.rgb_image.shape[0])
# # print "ix1: ",ix1
# # print "iy1: ",iy1
# # print "ix2: ",ix2
# # print "iy2: ",iy2
# rect = (ix1,iy1,ix2,iy2)
# print "Grab Cut Started..."
# cv2.grabCut(self.rgb_image,mask,rect,bgdModel,fgdModel,10,cv2.GC_INIT_WITH_RECT)
# print "Grab Cut done..."
# # all 0-pixels and 2-pixels are put to 0 (ie background) and all 1-pixels and 3-pixels are put to 1(ie foreground pixels)
# mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
# interested_cluster_indices = np.where(mask2 == 1)
# tempimg = self.rgb_image*mask2[:,:,np.newaxis]
cv2.imshow("Segmentation",tempimg)
cv2.waitKey(5)
roi_points = []
for i,j in zip(interested_cluster_indices[0], interested_cluster_indices[1]):
pix_point =np.zeros(2)
pix_point[0] = j
pix_point[1] = i
point_temp = self.img_to_world(pix_point)
if np.isnan(point_temp).sum() == 0:
roi_points.append(point_temp)
roi_points = np.array(roi_points).reshape(-1,3)
# Remove the possible outliers
# roi_points = roi_points | depth_stream = open("/home/chentao/kinect_calibration/depth_0000000000000000.yaml", "r") | identifier_body |
grasp_pos.py | 1
cv2.namedWindow('RGB Image')
cv2.setMouseCallback('RGB Image',self.draw_rect)
def depth_callback(self,data):
try:
self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
except CvBridgeError as e:
print(e)
# print "depth"
depth_min = np.nanmin(self.depth_image)
depth_max = np.nanmax(self.depth_image)
depth_img = self.depth_image.copy()
depth_img[np.isnan(self.depth_image)] = depth_min
depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
cv2.imshow("Depth Image", depth_img)
cv2.waitKey(5)
def rgb_callback(self,data):
try:
self.rgb_image = self.br.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
tempimg = self.rgb_image.copy()
if self.drawing or self.rect_done:
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
cv2.rectangle(tempimg,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2)
if self.rect_done:
center_point = self.get_center_point()
cv2.circle(tempimg, tuple(center_point.astype(int)), 3, (0,0,255),-1)
cv2.imshow('RGB Image', tempimg)
cv2.waitKey(5)
# print "rgb"
def load_extrinsics(self):
ir_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "r")
ir_doc = yaml.load(ir_stream)
self.ir_rmat = np.array(ir_doc['rmat']).reshape(3,3)
self.ir_tvec = np.array(ir_doc['tvec'])
ir_stream.close()
rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "r")
rgb_doc = yaml.load(rgb_stream)
self.rgb_rmat = np.array(rgb_doc['rmat']).reshape(3,3)
self.rgb_tvec = np.array(rgb_doc['tvec'])
rgb_stream.close()
self.rgb_to_world_rmat = self.rgb_rmat.T
self.rgb_to_world_tvec = -np.dot(self.rgb_rmat.T, self.rgb_tvec)
self.ir_to_world_rmat = self.ir_rmat.T
self.ir_to_world_tvec = -np.dot(self.ir_rmat.T, self.ir_tvec)
def load_intrinsics(self):
depth_stream = open("/home/chentao/kinect_calibration/depth_0000000000000000.yaml", "r")
depth_doc = yaml.load(depth_stream)
self.depth_mtx = np.array(depth_doc['camera_matrix']['data']).reshape(3,3)
self.depth_dist = np.array(depth_doc['distortion_coefficients']['data'])
depth_stream.close()
rgb_stream = open("/home/chentao/kinect_calibration/rgb_0000000000000000.yaml", "r")
rgb_doc = yaml.load(rgb_stream)
self.rgb_mtx = np.array(rgb_doc['camera_matrix']['data']).reshape(3,3)
self.rgb_dist = np.array(rgb_doc['distortion_coefficients']['data'])
rgb_stream.close()
def img_to_world(self, pix_point):
if self.depth_image == None or self.rgb_image == None:
return
# pix_point is (u,v) : the coordinates on the image
depth_pix_point = np.array([pix_point[0], pix_point[1], 1]) * self.depth_image[pix_point[1], pix_point[0]]
depth_coord_point = np.dot(np.linalg.inv(self.rgb_mtx), depth_pix_point.reshape(-1,1))
point_in_world = np.dot(self.rgb_to_world_rmat, depth_coord_point.reshape(-1,1)) + self.rgb_to_world_tvec
point_in_world[0] += x_offset
point_in_world[1] += y_offset
point_in_world[2] += z_offset
return point_in_world
def get_center_point(self):
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
pix_point = np.zeros(2)
pix_point[0] = (self.ix1 + self.ix2) / 2
pix_point[1] = (self.iy1 + self.iy2) / 2
# print "center point in image: ",pix_point
return pix_point
def get_orientation(self):
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
if self.ix1 > self.ix2:
temp = self.ix2
self.ix2 = self.ix1
self.ix1 = temp
if self.iy1 > self.iy2:
temp = self.iy2
self.iy2 = self.iy1
self.iy1 = temp
roi_width = self.ix2 - self.ix1 + 1
roi_height = self.iy2 - self.iy1 + 1
roi = self.rgb_image[self.iy1:self.iy2 + 1, self.ix1:self.ix2 + 1, :].reshape(-1,3).astype(float)
roi = preprocessing.scale(roi)
# # KMeans
kmeans = KMeans(n_clusters=2)
kmeans.fit(roi)
y = kmeans.predict(roi)
y = y.reshape(roi_height, roi_width)
# Find the mode of the cluster index in the boundary, assume that the mode represent the cluster of background
wid = 10
t = np.append(y[:,:wid].reshape(-1,1),y[:,y.shape[1] - wid:].reshape(-1,1))
t = np.append(t, y[y.shape[0] - wid:,wid:y.shape[1] - wid].reshape(-1,1))
t = np.append(t, y[0:wid, wid:y.shape[1] - wid].reshape(-1,1))
# since the cluster index can only be 0 or 1 here,so if the background is 0, then our target is 1, vice versa.
interested_cluster = 1 - stats.mode(t)[0][0]
interested_cluster_indices = np.array(np.where(y == interested_cluster))
interested_cluster_indices[0] += self.iy1
interested_cluster_indices[1] += self.ix1
tempimg = self.rgb_image.copy()
tempimg[interested_cluster_indices[0], interested_cluster_indices[1],:] = np.zeros((1,3))
# Grab Cut
# mask = np.zeros(self.rgb_image.shape[:2],np.uint8)
# bgdModel = np.zeros((1,65),np.float64)
# fgdModel = np.zeros((1,65),np.float64)
# ix1 = max(self.ix1,1)
# iy1 = max(self.iy1,1)
# ix2 = max(self.ix2,1)
# iy2 = max(self.iy2,1)
# ix1 = min(self.ix1,self.rgb_image.shape[1])
# iy1 = min(self.iy1,self.rgb_image.shape[0])
# ix2 = min(self.ix2,self.rgb_image.shape[1])
# iy2 = min(self.iy2,self.rgb_image.shape[0])
# # print "ix1: ",ix1
# # print "iy1: ",iy1
# # print "ix2: ",ix2
# # print "iy2: ",iy2
# rect = (ix1,iy1,ix2,iy2)
# print "Grab Cut Started..."
# cv2.grabCut(self.rgb_image,mask,rect,bgdModel,fgdModel,10,cv2.GC_INIT_WITH_RECT)
# print "Grab Cut done..."
# # all 0-pixels and 2-pixels are put to 0 (ie background) and all 1-pixels and 3-pixels are put to 1(ie foreground pixels) | cv2.imshow("Segmentation",tempimg)
cv2.waitKey(5)
roi_points = []
for i,j in zip(interested_cluster_indices[0], interested_cluster_indices[1]):
pix_point =np.zeros(2)
pix_point[0] = j
pix_point[1] = i
point_temp = self.img_to_world(pix_point)
if np.isnan(point_temp).sum() == 0:
roi_points.append(point_temp)
roi_points = np.array(roi_points).reshape(-1,3)
# Remove the possible outliers
# roi_points = roi_points | # mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
# interested_cluster_indices = np.where(mask2 == 1)
# tempimg = self.rgb_image*mask2[:,:,np.newaxis]
| random_line_split |
grasp_pos.py | 1
cv2.namedWindow('RGB Image')
cv2.setMouseCallback('RGB Image',self.draw_rect)
def depth_callback(self,data):
try:
self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
except CvBridgeError as e:
print(e)
# print "depth"
depth_min = np.nanmin(self.depth_image)
depth_max = np.nanmax(self.depth_image)
depth_img = self.depth_image.copy()
depth_img[np.isnan(self.depth_image)] = depth_min
depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
cv2.imshow("Depth Image", depth_img)
cv2.waitKey(5)
def rgb_callback(self,data):
try:
self.rgb_image = self.br.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
tempimg = self.rgb_image.copy()
if self.drawing or self.rect_done:
|
cv2.imshow('RGB Image', tempimg)
cv2.waitKey(5)
# print "rgb"
def load_extrinsics(self):
ir_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "r")
ir_doc = yaml.load(ir_stream)
self.ir_rmat = np.array(ir_doc['rmat']).reshape(3,3)
self.ir_tvec = np.array(ir_doc['tvec'])
ir_stream.close()
rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "r")
rgb_doc = yaml.load(rgb_stream)
self.rgb_rmat = np.array(rgb_doc['rmat']).reshape(3,3)
self.rgb_tvec = np.array(rgb_doc['tvec'])
rgb_stream.close()
self.rgb_to_world_rmat = self.rgb_rmat.T
self.rgb_to_world_tvec = -np.dot(self.rgb_rmat.T, self.rgb_tvec)
self.ir_to_world_rmat = self.ir_rmat.T
self.ir_to_world_tvec = -np.dot(self.ir_rmat.T, self.ir_tvec)
def load_intrinsics(self):
depth_stream = open("/home/chentao/kinect_calibration/depth_0000000000000000.yaml", "r")
depth_doc = yaml.load(depth_stream)
self.depth_mtx = np.array(depth_doc['camera_matrix']['data']).reshape(3,3)
self.depth_dist = np.array(depth_doc['distortion_coefficients']['data'])
depth_stream.close()
rgb_stream = open("/home/chentao/kinect_calibration/rgb_0000000000000000.yaml", "r")
rgb_doc = yaml.load(rgb_stream)
self.rgb_mtx = np.array(rgb_doc['camera_matrix']['data']).reshape(3,3)
self.rgb_dist = np.array(rgb_doc['distortion_coefficients']['data'])
rgb_stream.close()
def img_to_world(self, pix_point):
if self.depth_image == None or self.rgb_image == None:
return
# pix_point is (u,v) : the coordinates on the image
depth_pix_point = np.array([pix_point[0], pix_point[1], 1]) * self.depth_image[pix_point[1], pix_point[0]]
depth_coord_point = np.dot(np.linalg.inv(self.rgb_mtx), depth_pix_point.reshape(-1,1))
point_in_world = np.dot(self.rgb_to_world_rmat, depth_coord_point.reshape(-1,1)) + self.rgb_to_world_tvec
point_in_world[0] += x_offset
point_in_world[1] += y_offset
point_in_world[2] += z_offset
return point_in_world
def get_center_point(self):
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
pix_point = np.zeros(2)
pix_point[0] = (self.ix1 + self.ix2) / 2
pix_point[1] = (self.iy1 + self.iy2) / 2
# print "center point in image: ",pix_point
return pix_point
def get_orientation(self):
if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
if self.ix1 > self.ix2:
temp = self.ix2
self.ix2 = self.ix1
self.ix1 = temp
if self.iy1 > self.iy2:
temp = self.iy2
self.iy2 = self.iy1
self.iy1 = temp
roi_width = self.ix2 - self.ix1 + 1
roi_height = self.iy2 - self.iy1 + 1
roi = self.rgb_image[self.iy1:self.iy2 + 1, self.ix1:self.ix2 + 1, :].reshape(-1,3).astype(float)
roi = preprocessing.scale(roi)
# # KMeans
kmeans = KMeans(n_clusters=2)
kmeans.fit(roi)
y = kmeans.predict(roi)
y = y.reshape(roi_height, roi_width)
# Find the mode of the cluster index in the boundary, assume that the mode represent the cluster of background
wid = 10
t = np.append(y[:,:wid].reshape(-1,1),y[:,y.shape[1] - wid:].reshape(-1,1))
t = np.append(t, y[y.shape[0] - wid:,wid:y.shape[1] - wid].reshape(-1,1))
t = np.append(t, y[0:wid, wid:y.shape[1] - wid].reshape(-1,1))
# since the cluster index can only be 0 or 1 here,so if the background is 0, then our target is 1, vice versa.
interested_cluster = 1 - stats.mode(t)[0][0]
interested_cluster_indices = np.array(np.where(y == interested_cluster))
interested_cluster_indices[0] += self.iy1
interested_cluster_indices[1] += self.ix1
tempimg = self.rgb_image.copy()
tempimg[interested_cluster_indices[0], interested_cluster_indices[1],:] = np.zeros((1,3))
# Grab Cut
# mask = np.zeros(self.rgb_image.shape[:2],np.uint8)
# bgdModel = np.zeros((1,65),np.float64)
# fgdModel = np.zeros((1,65),np.float64)
# ix1 = max(self.ix1,1)
# iy1 = max(self.iy1,1)
# ix2 = max(self.ix2,1)
# iy2 = max(self.iy2,1)
# ix1 = min(self.ix1,self.rgb_image.shape[1])
# iy1 = min(self.iy1,self.rgb_image.shape[0])
# ix2 = min(self.ix2,self.rgb_image.shape[1])
# iy2 = min(self.iy2,self.rgb_image.shape[0])
# # print "ix1: ",ix1
# # print "iy1: ",iy1
# # print "ix2: ",ix2
# # print "iy2: ",iy2
# rect = (ix1,iy1,ix2,iy2)
# print "Grab Cut Started..."
# cv2.grabCut(self.rgb_image,mask,rect,bgdModel,fgdModel,10,cv2.GC_INIT_WITH_RECT)
# print "Grab Cut done..."
# # all 0-pixels and 2-pixels are put to 0 (ie background) and all 1-pixels and 3-pixels are put to 1(ie foreground pixels)
# mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
# interested_cluster_indices = np.where(mask2 == 1)
# tempimg = self.rgb_image*mask2[:,:,np.newaxis]
cv2.imshow("Segmentation",tempimg)
cv2.waitKey(5)
roi_points = []
for i,j in zip(interested_cluster_indices[0], interested_cluster_indices[1]):
pix_point =np.zeros(2)
pix_point[0] = j
pix_point[1] = i
point_temp = self.img_to_world(pix_point)
if np.isnan(point_temp).sum() == 0:
roi_points.append(point_temp)
roi_points = np.array(roi_points).reshape(-1,3)
# Remove the possible outliers
# roi_points = roi_points | if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1):
cv2.rectangle(tempimg,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2)
if self.rect_done:
center_point = self.get_center_point()
cv2.circle(tempimg, tuple(center_point.astype(int)), 3, (0,0,255),-1) | conditional_block |
Draft.d.ts | ();
* ```
*
* **Querying drafts**
*
* ```javascript
* // query a list of drafts in the inbox with the tag "blue"
* let drafts = Draft.query("", "inbox", ["blue"])
* ```
*/
declare class Draft {
/**
* Create new instance.
*/
constructor()
/**
* Unique identifier.
*/
readonly uuid: string
/**
* The full text content.
*/
content: string
/**
* The first line. | readonly title: string
/**
* Generally, the first line of the draft, but cleaned up as it would be displayed in the draft list in the user interface, removing Markdown header characters, etc.
*/
readonly displayTitle: string
/**
* The lines of content separated into an array on `\n` line feeds. This is a convenience method an equivalent to `content.split('\n');`
*/
readonly lines: [string]
/**
* Convenience method to filter lines of a draft, returning only the lines that begin with a certain string.
*/
linesWithPrefex(prefix: string): [string]
/**
* Convenience method to scan the draft content for valid URLs, and return all found URLs as an array. This will return valid full URL strings - both for `http(s)` and custom URLs found in the text.
*/
readonly urls: [string]
/**
* Return the a trimmed display version of the "body" of the draft (content after first line), similar to what is displayed as a preview in the draft list._
*/
bodyPreview(maxLength: number): string
/**
* @category Deprecated
* @deprecated use `syntax` property.
*/
languageGrammar:
| 'Plain Text'
| 'Markdown'
| 'Taskpaper'
| 'JavaScript'
| 'Simple List'
| 'MultiMarkdown'
| 'GitHub Markdown'
/**
* The syntax definition used when displaying this draft in the editor.
*/
syntax: Syntax
/**
* The index location in the string of the beginning of the last text selection.
*/
readonly selectionStart: number
/**
* The length of the last text selection.
*/
readonly selectionLength: number
/**
* Array of string tag names assigned.
* @category Tag
*/
readonly tags: string[]
/**
* Is the draft current in the archive. If `false`, the draft is in the inbox.
*/
isArchived: boolean
/**
* Is the draft currently in the trash.
*/
isTrashed: boolean
/**
* Current flagged status.
*/
isFlagged: boolean
/**
* Date the draft was created. This property is generally maintained by Drafts automatically and is it not recommended it be set directly unless needed to maintain information from an external source when importing.
* @category Date
*/
createdAt: Date
/**
* Numeric longitude where the draft was created. This value will be `0` if no location information was available.
* @category Location
*/
createdLongitude: number
/**
* Numeric latitude where the draft was created. This value will be `0` if no location information was available.
* @category Location
*/
createdLatitude: number
/**
* Date the draft was last modified. This property is generally maintained by Drafts automatically and is it not recommended it be set directly unless needed to maintain information from an external source when importing.
* @category Date
*/
modifiedAt: Date
/**
* Numeric longitude where the draft was last modified. This value will be `0` if no location information was available.
* @category Location
*/
modifiedLongitude: number
/**
* Numeric longitude where the draft was last modified. This value will be `0` if no location information was available.
* @category Location
*/
modifiedLatitude: number
/**
* URL which can be used to open the draft. URLs are cross-platform, but specific to an individual user's drafts datastore.
*/
readonly permalink: string
/**
* Save changes made to the draft to the database. _`update()` must be called to save changes made to a draft._
*/
update(): void
/**
* Assign a tag
* @category Tag
*/
addTag(tag: string): void
/**
* Remove a tag if it is assigned to the draft.
* @category Tag
*/
removeTag(tag: string): void
/**
* Check whether a tag is currently assigned to the draft.
* @category Tag
*/
hasTag(tag: string): boolean
/**
* Runs the template string through the [Drafts Template](https://docs.getdrafts.com/docs/actions/templates/drafts-templates) engine to evaluate tags.
* @category Template
*/
processTemplate(template: string): string
/**
* Runs the template string through the [Mustache template](https://docs.getdrafts.com/docs/actions/templates/mustache-templates) engine to evaluate tags. Allows additional values and partials to be provided to the context.
* @param template Template string
* @param additionalValues An object containing additional values you wish to make available in the Mustache context.
* @param partials An object containing string keys and values which will contain additional templates you which to make available for use as partials and layouts.
* @category Template
*/
processMustacheTemplate(template: string, additionalValues: Object, partials: Object): string
/**
* Set a custom template tag value for use in templates. For example, calling `setTemplateTag("mytag", "mytext")` will create a tag `[[mytag]]`, which subsequent action steps in the same action can use in their templates. These values are also available in Mustache templates, but as `{{mytag}}`.
* @category Template
*/
setTemplateTag(tagName: string, value: string): void
/**
* Get the current value of a custom template tag.
* @category Template
*/
getTemplateTag(tagName: string): string
/**
* Append text to the end of the draft's `content`. This is a convenience function.
* @param text The text to append
* @param separator An optional separator string to use between content and added text. Defaults to a single line feed.
*/
append(text: string, separator?: string): void
/**
* Prepend text to the beginning of the draft's `content`. This is a convenience function.
* @param text The text to prepend
* @param separator An optional separator string to use between content and added text. Defaults to a single line feed.
*/
prepend(text: string, separator?: string): void
/**
* Insert text into the draft's `content` at the line indicated. This is a convenience function.
* @param text The text to in
* @param line The index of the line number at which to insert the text. Line numbers are zero-based, so `0` is the first line. Drafts will range-check the line.
*/
insert(text: string, line: number): void
/**
* Array of versions representing the entire saved version history for this draft.
* @category ActionLog
*/
readonly actionLogs: ActionLog[]
/**
* Array of versions representing the entire saved version history for this draft.
* @category Version
*/
readonly versions: Version[]
/**
* Create a version in the version history representing the current state of the draft.
* @category Version
*/
saveVersion()
/**
* Create a new draft object. This is an in-memory object only, unless "update()" is called to save the draft.
*/
static create(): Draft
/**
* Find an existing draft based on UUID.
* @category Querying
*/
static find(uuid: string): Draft
/**
* Perform a search for drafts and return an array of matching draft objects.
* @param queryString Search string, as you would type in the search box in the draft list. Will find only drafts with a matching string in their contents. Use empty string (`""`) not to filter.
* @param filter Filter by one of the allowed values
* @param tags Results will only include drafts with all the listed tags assigned.
* @param omitTags Results will omit drafts with any of these tags assigned.
* @param sort
* @param sortDescending If `true`, sort descending. Defaults to `false`.
* @param sortFlaggedToTop If `true`, sort flagged drafts to beginning. Defaults to `false`.
* @category Querying
*/
static query(
queryString: string,
filter: 'inbox' | 'archive' | 'flagged' | 'trash' | 'all',
tags: string[],
omitTags: string[],
sort: sortBy,
sortDescending: boolean,
sortFlaggedToTop: boolean
): Draft[]
/**
* Search for drafts containing the title string in the first line of their content. This mimics the logic used by the `/open?title=Title` URL scheme to locate drafts by title when triggering embedded [cross-links](https://docs.getdrafts.com/docs/drafts/cross-linking).
* @category Querying
*/
static queryByTitle(title: string): Draft[]
/**
| */ | random_line_split |
Draft.d.ts | * ```
*
* **Querying drafts**
*
* ```javascript
* // query a list of drafts in the inbox with the tag "blue"
* let drafts = Draft.query("", "inbox", ["blue"])
* ```
*/
declare class | {
/**
* Create new instance.
*/
constructor()
/**
* Unique identifier.
*/
readonly uuid: string
/**
* The full text content.
*/
content: string
/**
* The first line.
*/
readonly title: string
/**
* Generally, the first line of the draft, but cleaned up as it would be displayed in the draft list in the user interface, removing Markdown header characters, etc.
*/
readonly displayTitle: string
/**
* The lines of content separated into an array on `\n` line feeds. This is a convenience method an equivalent to `content.split('\n');`
*/
readonly lines: [string]
/**
* Convenience method to filter lines of a draft, returning only the lines that begin with a certain string.
*/
linesWithPrefex(prefix: string): [string]
/**
* Convenience method to scan the draft content for valid URLs, and return all found URLs as an array. This will return valid full URL strings - both for `http(s)` and custom URLs found in the text.
*/
readonly urls: [string]
/**
* Return the a trimmed display version of the "body" of the draft (content after first line), similar to what is displayed as a preview in the draft list._
*/
bodyPreview(maxLength: number): string
/**
* @category Deprecated
* @deprecated use `syntax` property.
*/
languageGrammar:
| 'Plain Text'
| 'Markdown'
| 'Taskpaper'
| 'JavaScript'
| 'Simple List'
| 'MultiMarkdown'
| 'GitHub Markdown'
/**
* The syntax definition used when displaying this draft in the editor.
*/
syntax: Syntax
/**
* The index location in the string of the beginning of the last text selection.
*/
readonly selectionStart: number
/**
* The length of the last text selection.
*/
readonly selectionLength: number
/**
* Array of string tag names assigned.
* @category Tag
*/
readonly tags: string[]
/**
* Is the draft current in the archive. If `false`, the draft is in the inbox.
*/
isArchived: boolean
/**
* Is the draft currently in the trash.
*/
isTrashed: boolean
/**
* Current flagged status.
*/
isFlagged: boolean
/**
* Date the draft was created. This property is generally maintained by Drafts automatically and is it not recommended it be set directly unless needed to maintain information from an external source when importing.
* @category Date
*/
createdAt: Date
/**
* Numeric longitude where the draft was created. This value will be `0` if no location information was available.
* @category Location
*/
createdLongitude: number
/**
* Numeric latitude where the draft was created. This value will be `0` if no location information was available.
* @category Location
*/
createdLatitude: number
/**
* Date the draft was last modified. This property is generally maintained by Drafts automatically and is it not recommended it be set directly unless needed to maintain information from an external source when importing.
* @category Date
*/
modifiedAt: Date
/**
* Numeric longitude where the draft was last modified. This value will be `0` if no location information was available.
* @category Location
*/
modifiedLongitude: number
/**
* Numeric longitude where the draft was last modified. This value will be `0` if no location information was available.
* @category Location
*/
modifiedLatitude: number
/**
* URL which can be used to open the draft. URLs are cross-platform, but specific to an individual user's drafts datastore.
*/
readonly permalink: string
/**
* Save changes made to the draft to the database. _`update()` must be called to save changes made to a draft._
*/
update(): void
/**
* Assign a tag
* @category Tag
*/
addTag(tag: string): void
/**
* Remove a tag if it is assigned to the draft.
* @category Tag
*/
removeTag(tag: string): void
/**
* Check whether a tag is currently assigned to the draft.
* @category Tag
*/
hasTag(tag: string): boolean
/**
* Runs the template string through the [Drafts Template](https://docs.getdrafts.com/docs/actions/templates/drafts-templates) engine to evaluate tags.
* @category Template
*/
processTemplate(template: string): string
/**
* Runs the template string through the [Mustache template](https://docs.getdrafts.com/docs/actions/templates/mustache-templates) engine to evaluate tags. Allows additional values and partials to be provided to the context.
* @param template Template string
* @param additionalValues An object containing additional values you wish to make available in the Mustache context.
* @param partials An object containing string keys and values which will contain additional templates you which to make available for use as partials and layouts.
* @category Template
*/
processMustacheTemplate(template: string, additionalValues: Object, partials: Object): string
/**
* Set a custom template tag value for use in templates. For example, calling `setTemplateTag("mytag", "mytext")` will create a tag `[[mytag]]`, which subsequent action steps in the same action can use in their templates. These values are also available in Mustache templates, but as `{{mytag}}`.
* @category Template
*/
setTemplateTag(tagName: string, value: string): void
/**
* Get the current value of a custom template tag.
* @category Template
*/
getTemplateTag(tagName: string): string
/**
* Append text to the end of the draft's `content`. This is a convenience function.
* @param text The text to append
* @param separator An optional separator string to use between content and added text. Defaults to a single line feed.
*/
append(text: string, separator?: string): void
/**
* Prepend text to the beginning of the draft's `content`. This is a convenience function.
* @param text The text to prepend
* @param separator An optional separator string to use between content and added text. Defaults to a single line feed.
*/
prepend(text: string, separator?: string): void
/**
* Insert text into the draft's `content` at the line indicated. This is a convenience function.
* @param text The text to in
* @param line The index of the line number at which to insert the text. Line numbers are zero-based, so `0` is the first line. Drafts will range-check the line.
*/
insert(text: string, line: number): void
/**
* Array of versions representing the entire saved version history for this draft.
* @category ActionLog
*/
readonly actionLogs: ActionLog[]
/**
* Array of versions representing the entire saved version history for this draft.
* @category Version
*/
readonly versions: Version[]
/**
* Create a version in the version history representing the current state of the draft.
* @category Version
*/
saveVersion()
/**
* Create a new draft object. This is an in-memory object only, unless "update()" is called to save the draft.
*/
static create(): Draft
/**
* Find an existing draft based on UUID.
* @category Querying
*/
static find(uuid: string): Draft
/**
* Perform a search for drafts and return an array of matching draft objects.
* @param queryString Search string, as you would type in the search box in the draft list. Will find only drafts with a matching string in their contents. Use empty string (`""`) not to filter.
* @param filter Filter by one of the allowed values
* @param tags Results will only include drafts with all the listed tags assigned.
* @param omitTags Results will omit drafts with any of these tags assigned.
* @param sort
* @param sortDescending If `true`, sort descending. Defaults to `false`.
* @param sortFlaggedToTop If `true`, sort flagged drafts to beginning. Defaults to `false`.
* @category Querying
*/
static query(
queryString: string,
filter: 'inbox' | 'archive' | 'flagged' | 'trash' | 'all',
tags: string[],
omitTags: string[],
sort: sortBy,
sortDescending: boolean,
sortFlaggedToTop: boolean
): Draft[]
/**
* Search for drafts containing the title string in the first line of their content. This mimics the logic used by the `/open?title=Title` URL scheme to locate drafts by title when triggering embedded [cross-links](https://docs.getdrafts.com/docs/drafts/cross-linking).
* @category Querying
*/
static queryByTitle(title: string): Draft[]
/**
| Draft | identifier_name |
client.go | Result, error)
Close()
}
type Client struct {
cfg *Config
pool *Pool
stopChan chan struct{}
notifyPollingChan chan *PollItem
polling *Polling
closed int32
logDetail bool
logInterval Duration
working int32
waitChan chan struct{}
}
func NewClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = new(Config)
}
cfg.init()
pool, err := NewPool(cfg.Host, cfg.UserName, cfg.Password, cfg.DBName, cfg.MinSessionCount, cfg.MaxSessionCount)
if err != nil {
return nil, err
}
cli := &Client{
cfg: cfg,
pool: pool,
polling: NewPolling(),
waitChan: make(chan struct{}),
logDetail: cfg.LogDetail,
}
for i := 0; i < cfg.PollingThread; i++ {
go cli.pollingLoop()
}
return cli, nil
}
func (c *Client) AddPollingWithCallback(op *Operation, f func(*PollingResult)) {
c.polling.AddCallback(op, f)
}
func (c *Client) AddPolling(op *Operation) chan *PollingResult {
return c.polling.Add(op)
}
func (c *Client) pollingLoop() {
timeout := time.Second
timer := time.NewTimer(timeout)
// just make the exit quickly. Don't care about the processing tasks.
loop:
for atomic.LoadInt32(&c.closed) == 0 {
poll := c.polling.Poll()
if poll == nil {
timer.Reset(c.cfg.BackoffTime.Unwrap())
select {
case <-c.polling.NewPollingChan:
case <-c.stopChan:
break loop
case <-timer.C:
}
continue
}
session, ok := c.pool.Require()
if !ok {
break loop
}
state, err := session.GetState(poll.Op)
c.polling.SubStaging()
if err != nil {
log.Debug("session getState error : ", err)
// invalid handle ? ignore
c.pool.Release(session)
continue
}
if !state.IsCompleted() {
poll.MarkBackoff(c.cfg.BackoffTime.Unwrap())
c.polling.AddItem(poll)
c.pool.Release(session)
continue
}
var result PollingResult
if state.IsSuccess() {
// success, schema is not required
schema, _ := session.GetSchema(poll.Op)
if schema == nil {
schema = new(Schema)
}
result.Schema = schema
} else {
result.Error = state.GetError()
// error
}
c.pool.Release(session)
// wake up one block SubmitAsync if someone is waiting
select {
case c.waitChan <- struct{}{}:
default:
}
if poll.Callback != nil {
go poll.Callback(&result)
}
if poll.Chan != nil {
poll.Chan <- &result
}
}
}
func (c *Client) Fetch(op *Operation, o Orientation) (*SegmentResult, error) {
session, ok := c.pool.Require()
if !ok {
return nil, ErrClosing
}
result, err := session.FetchResult(op, o, c.cfg.BenchSize)
c.pool.Release(session)
if err != nil {
return nil, err
}
return result, err
}
func (c *Client) ResultSlice(sql string) ([]string, error) {
result, err := c.Execute(sql)
if err != nil {
return nil, err
}
var id string
var ids []string
for result.NextPage() {
for result.NextInPage() {
result.Scan(&id)
ids = append(ids, id)
}
}
if err := result.Err(); err != nil {
return nil, err
}
return ids, nil
}
func (c *Client) ResultCount(sql string) (int64, error) {
result, err := c.Execute(sql)
if err != nil {
return -1, err
}
var count int64
if result.Next() {
result.Scan(&count)
}
if err := result.Err(); err != nil {
return -1, err
}
return count, nil
}
func (c *Client) Execute(statement string) (*ExecuteResult, error) {
return c.ExecuteEx(statement, false)
}
func (c *Client) ExecuteAsyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
type result struct {
hiveResult *ExecuteResult
err error
}
retChan := make(chan *result, 1)
go func() {
if ret, err := c.ExecuteEx(statement, true); err != nil {
retChan <- &result{nil, err}
} else {
retChan <- &result{ret, nil}
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case ret := <-retChan:
if ret.err != nil {
return nil, ret.err
} else {
return ret.hiveResult, nil
}
}
}
func (c *Client) ExecuteSyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
type result struct {
hiveResult *ExecuteResult
err error
}
retChan := make(chan *result, 1)
go func() {
if ret, err := c.ExecuteEx(statement, false); err != nil {
retChan <- &result{nil, err}
} else {
retChan <- &result{ret, nil}
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case ret := <-retChan:
if ret.err != nil | else {
return ret.hiveResult, nil
}
}
}
func (c *Client) SubmitAsync(statement string) (*ExecuteResult, error) {
return c.ExecuteEx(statement, true)
}
func (c *Client) SubmitAsyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
return c.ExecuteAsyncCtx(ctx, statement)
}
func (c *Client) AddAsync(dimension *HiveContext, statement string, callback func(*ExecuteResult)) error {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
dimension.Add()
ret, err := c.SubmitAsync(statement)
if err != nil {
err = fmt.Errorf("%v: %v", err, statement)
dimension.Error(err)
return err
}
ret.RunOnFinish(func() {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
callback(ret)
dimension.Done()
})
return nil
}
func (c *Client) AddAsyncCtx(ctx context.Context, dimension *HiveContext, statement string, callback func(*ExecuteResult)) error {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
dimension.Add()
ret, err := c.SubmitAsyncCtx(ctx, statement)
if err != nil {
err = fmt.Errorf("%v: %v", err, statement)
dimension.Error(err)
return err
}
ret.RunOnFinish(func() {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
callback(ret)
dimension.Done()
})
return nil
}
func (c *Client) StageSize() int {
return int(atomic.LoadInt32(&c.working))
}
func (c *Client) checkConcurrentLimit() {
if c.polling.Size() > c.cfg.Concurrent {
<-c.waitChan
}
}
func (c *Client) ExecuteEx(statement string, async bool) (*ExecuteResult, error) {
// exceed the max of concurrent size
c.checkConcurrentLimit()
session, ok := c.pool.Require()
if !ok {
return nil, ErrClosing
}
defer c.pool.Release(session)
//if err, _ := session.SubmitEx("SET hive.execution.engine=spark;", false); err != nil {
//log.Error(err)
//}
//_, err := session.Submit("set spark.client.server.connect.timeout=600s;set hive.spark.job.monitor.timeout=600s;set hive.spark.client.connect.timeout=100000;")
//if err != nil {
// log.Error(err)
//}
log.Debug("Execute HQL: ", statement)
op, err := session.SubmitEx(statement, async)
if err != nil {
log.Errorf("ExecuteEx HQL error: %s, %s", statement, err.Error())
return nil, err
}
if c.logDetail {
go c.logResponseLog(op.Handle)
}
//启动了hive日志打印开关
return newResult(c, op, statement), nil
}
func (c *Client) Close() {
if !atomic.CompareAndSwapInt32(&c.closed, 0, 1) {
return
}
c.pool.Close()
}
//打印hive日志。注意本方法会一直执行直到hive结束(成功or失败),建议单起一个goroutine来执行本方法
func (c *Client) logResponseLog(handle *tcliservice.TOperationHandle) {
start := time.Now()
//每次 | {
return nil, ret.err
} | conditional_block |
client.go | Result, error)
Close()
}
type Client struct {
cfg *Config
pool *Pool
stopChan chan struct{}
notifyPollingChan chan *PollItem
polling *Polling
closed int32
logDetail bool
logInterval Duration
working int32
waitChan chan struct{}
}
func NewClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = new(Config)
}
cfg.init()
pool, err := NewPool(cfg.Host, cfg.UserName, cfg.Password, cfg.DBName, cfg.MinSessionCount, cfg.MaxSessionCount)
if err != nil {
return nil, err
}
cli := &Client{
cfg: cfg,
pool: pool,
polling: NewPolling(),
waitChan: make(chan struct{}),
logDetail: cfg.LogDetail,
}
for i := 0; i < cfg.PollingThread; i++ {
go cli.pollingLoop()
}
return cli, nil
}
func (c *Client) AddPollingWithCallback(op *Operation, f func(*PollingResult)) {
c.polling.AddCallback(op, f)
}
func (c *Client) AddPolling(op *Operation) chan *PollingResult {
return c.polling.Add(op)
}
func (c *Client) pollingLoop() {
timeout := time.Second
timer := time.NewTimer(timeout)
// just make the exit quickly. Don't care about the processing tasks.
loop:
for atomic.LoadInt32(&c.closed) == 0 {
poll := c.polling.Poll()
if poll == nil {
timer.Reset(c.cfg.BackoffTime.Unwrap())
select {
case <-c.polling.NewPollingChan:
case <-c.stopChan:
break loop
case <-timer.C:
}
continue
}
session, ok := c.pool.Require()
if !ok {
break loop
}
state, err := session.GetState(poll.Op)
c.polling.SubStaging()
if err != nil {
log.Debug("session getState error : ", err)
// invalid handle ? ignore
c.pool.Release(session)
continue
}
if !state.IsCompleted() {
poll.MarkBackoff(c.cfg.BackoffTime.Unwrap())
c.polling.AddItem(poll)
c.pool.Release(session)
continue
}
var result PollingResult
if state.IsSuccess() {
// success, schema is not required
schema, _ := session.GetSchema(poll.Op)
if schema == nil {
schema = new(Schema)
}
result.Schema = schema
} else {
result.Error = state.GetError()
// error
}
c.pool.Release(session)
// wake up one block SubmitAsync if someone is waiting
select {
case c.waitChan <- struct{}{}:
default:
}
if poll.Callback != nil {
go poll.Callback(&result)
}
if poll.Chan != nil {
poll.Chan <- &result
}
}
}
func (c *Client) Fetch(op *Operation, o Orientation) (*SegmentResult, error) {
session, ok := c.pool.Require()
if !ok {
return nil, ErrClosing
}
result, err := session.FetchResult(op, o, c.cfg.BenchSize)
c.pool.Release(session)
if err != nil {
return nil, err
}
return result, err
}
func (c *Client) ResultSlice(sql string) ([]string, error) {
result, err := c.Execute(sql)
if err != nil {
return nil, err
}
var id string
var ids []string
for result.NextPage() {
for result.NextInPage() {
result.Scan(&id)
ids = append(ids, id)
}
}
if err := result.Err(); err != nil {
return nil, err
}
return ids, nil
}
func (c *Client) ResultCount(sql string) (int64, error) {
result, err := c.Execute(sql)
if err != nil {
return -1, err
}
var count int64
if result.Next() {
result.Scan(&count)
}
if err := result.Err(); err != nil {
return -1, err
}
return count, nil
}
func (c *Client) Execute(statement string) (*ExecuteResult, error) {
return c.ExecuteEx(statement, false)
}
func (c *Client) ExecuteAsyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
type result struct {
hiveResult *ExecuteResult
err error
}
retChan := make(chan *result, 1)
go func() {
if ret, err := c.ExecuteEx(statement, true); err != nil {
retChan <- &result{nil, err}
} else {
retChan <- &result{ret, nil}
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case ret := <-retChan:
if ret.err != nil {
return nil, ret.err
} else {
return ret.hiveResult, nil
}
}
}
func (c *Client) ExecuteSyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
type result struct {
hiveResult *ExecuteResult
err error
}
retChan := make(chan *result, 1)
go func() {
if ret, err := c.ExecuteEx(statement, false); err != nil {
retChan <- &result{nil, err}
} else {
retChan <- &result{ret, nil}
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case ret := <-retChan:
if ret.err != nil {
return nil, ret.err
} else {
return ret.hiveResult, nil
}
}
}
func (c *Client) SubmitAsync(statement string) (*ExecuteResult, error) {
return c.ExecuteEx(statement, true)
}
func (c *Client) SubmitAsyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
return c.ExecuteAsyncCtx(ctx, statement)
}
func (c *Client) AddAsync(dimension *HiveContext, statement string, callback func(*ExecuteResult)) error {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
dimension.Add()
ret, err := c.SubmitAsync(statement)
if err != nil {
err = fmt.Errorf("%v: %v", err, statement)
dimension.Error(err)
return err
}
ret.RunOnFinish(func() {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
callback(ret)
dimension.Done()
})
return nil
}
func (c *Client) AddAsyncCtx(ctx context.Context, dimension *HiveContext, statement string, callback func(*ExecuteResult)) error {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
dimension.Add()
ret, err := c.SubmitAsyncCtx(ctx, statement)
if err != nil {
err = fmt.Errorf("%v: %v", err, statement)
dimension.Error(err)
return err
}
ret.RunOnFinish(func() {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
callback(ret)
dimension.Done()
})
return nil
}
func (c *Client) StageSize() int {
return int(atomic.LoadInt32(&c.working))
}
func (c *Client) checkConcurrentLimit() {
if c.polling.Size() > c.cfg.Concurrent {
<-c.waitChan
}
}
func (c *Client) ExecuteEx(statement string, async bool) (*ExecuteResult, error) {
// exceed the max of concurrent size
c.checkConcurrentLimit()
session, ok := c.pool.Require()
if !ok {
return nil, ErrClosing
}
defer c.pool.Release(session)
//if err, _ := session.SubmitEx("SET hive.execution.engine=spark;", false); err != nil {
//log.Error(err)
//}
//_, err := session.Submit("set spark.client.server.connect.timeout=600s;set hive.spark.job.monitor.timeout=600s;set hive.spark.client.connect.timeout=100000;")
//if err != nil {
// log.Error(err)
//}
log.Debug("Execute HQL: ", statement)
op, err := session.SubmitEx(statement, async)
if err != nil {
log.Errorf("ExecuteEx HQL error: %s, %s", statement, err.Error())
return nil, err
}
if c.logDetail {
go c.logResponseLog(op.Handle)
}
//启动了hive日志打印开关
return newResult(c, op, statement), nil
}
func (c *Client) Close() {
if !ato | ompareAndSwapInt32(&c.closed, 0, 1) {
return
}
c.pool.Close()
}
//打印hive日志。注意本方法会一直执行直到hive结束(成功or失败),建议单起一个goroutine来执行本方法
func (c *Client) logResponseLog(handle *tcliservice.TOperationHandle) {
start := time.Now()
| mic.C | identifier_name |
client.go | ExecuteResult, error)
Close()
}
type Client struct {
cfg *Config
pool *Pool
stopChan chan struct{}
notifyPollingChan chan *PollItem
polling *Polling
closed int32
logDetail bool
logInterval Duration
working int32
waitChan chan struct{}
}
func NewClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = new(Config)
}
cfg.init()
pool, err := NewPool(cfg.Host, cfg.UserName, cfg.Password, cfg.DBName, cfg.MinSessionCount, cfg.MaxSessionCount)
if err != nil {
return nil, err
}
cli := &Client{
cfg: cfg,
pool: pool,
polling: NewPolling(),
waitChan: make(chan struct{}),
logDetail: cfg.LogDetail,
}
for i := 0; i < cfg.PollingThread; i++ {
go cli.pollingLoop()
}
return cli, nil
}
func (c *Client) AddPollingWithCallback(op *Operation, f func(*PollingResult)) {
c.polling.AddCallback(op, f)
}
func (c *Client) AddPolling(op *Operation) chan *PollingResult {
return c.polling.Add(op)
}
func (c *Client) pollingLoop() {
timeout := time.Second
timer := time.NewTimer(timeout)
// just make the exit quickly. Don't care about the processing tasks.
loop:
for atomic.LoadInt32(&c.closed) == 0 {
poll := c.polling.Poll()
if poll == nil {
timer.Reset(c.cfg.BackoffTime.Unwrap())
select {
case <-c.polling.NewPollingChan:
case <-c.stopChan:
break loop
case <-timer.C:
}
continue
}
session, ok := c.pool.Require()
if !ok {
break loop
}
state, err := session.GetState(poll.Op)
c.polling.SubStaging()
if err != nil {
log.Debug("session getState error : ", err)
// invalid handle ? ignore
c.pool.Release(session)
continue
}
if !state.IsCompleted() {
poll.MarkBackoff(c.cfg.BackoffTime.Unwrap())
c.polling.AddItem(poll)
c.pool.Release(session)
continue
}
var result PollingResult
if state.IsSuccess() {
// success, schema is not required
schema, _ := session.GetSchema(poll.Op)
if schema == nil {
schema = new(Schema)
}
result.Schema = schema
} else {
result.Error = state.GetError()
// error
}
c.pool.Release(session)
// wake up one block SubmitAsync if someone is waiting
select {
case c.waitChan <- struct{}{}:
default:
}
if poll.Callback != nil {
go poll.Callback(&result)
}
if poll.Chan != nil {
poll.Chan <- &result
}
}
}
func (c *Client) Fetch(op *Operation, o Orientation) (*SegmentResult, error) {
session, ok := c.pool.Require()
if !ok {
return nil, ErrClosing
}
result, err := session.FetchResult(op, o, c.cfg.BenchSize)
c.pool.Release(session)
if err != nil {
return nil, err
}
return result, err
}
func (c *Client) ResultSlice(sql string) ([]string, error) {
result, err := c.Execute(sql)
if err != nil {
return nil, err
}
var id string
var ids []string
for result.NextPage() {
for result.NextInPage() {
result.Scan(&id)
ids = append(ids, id)
}
}
if err := result.Err(); err != nil {
return nil, err
}
return ids, nil
}
func (c *Client) ResultCount(sql string) (int64, error) {
result, err := c.Execute(sql)
if err != nil {
return -1, err
}
var count int64
if result.Next() {
result.Scan(&count)
}
if err := result.Err(); err != nil {
return -1, err
}
return count, nil
}
func (c *Client) Execute(statement string) (*ExecuteResult, error) {
return c.ExecuteEx(statement, false)
}
func (c *Client) ExecuteAsyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
type result struct {
hiveResult *ExecuteResult
err error
}
retChan := make(chan *result, 1)
go func() {
if ret, err := c.ExecuteEx(statement, true); err != nil {
retChan <- &result{nil, err}
} else {
retChan <- &result{ret, nil}
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case ret := <-retChan:
if ret.err != nil {
return nil, ret.err
} else {
return ret.hiveResult, nil
}
}
}
func (c *Client) ExecuteSyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
| retChan := make(chan *result, 1)
go func() {
if ret, err := c.ExecuteEx(statement, false); err != nil {
retChan <- &result{nil, err}
} else {
retChan <- &result{ret, nil}
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case ret := <-retChan:
if ret.err != nil {
return nil, ret.err
} else {
return ret.hiveResult, nil
}
}
}
func (c *Client) SubmitAsync(statement string) (*ExecuteResult, error) {
return c.ExecuteEx(statement, true)
}
func (c *Client) SubmitAsyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
return c.ExecuteAsyncCtx(ctx, statement)
}
func (c *Client) AddAsync(dimension *HiveContext, statement string, callback func(*ExecuteResult)) error {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
dimension.Add()
ret, err := c.SubmitAsync(statement)
if err != nil {
err = fmt.Errorf("%v: %v", err, statement)
dimension.Error(err)
return err
}
ret.RunOnFinish(func() {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
callback(ret)
dimension.Done()
})
return nil
}
func (c *Client) AddAsyncCtx(ctx context.Context, dimension *HiveContext, statement string, callback func(*ExecuteResult)) error {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
dimension.Add()
ret, err := c.SubmitAsyncCtx(ctx, statement)
if err != nil {
err = fmt.Errorf("%v: %v", err, statement)
dimension.Error(err)
return err
}
ret.RunOnFinish(func() {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
callback(ret)
dimension.Done()
})
return nil
}
func (c *Client) StageSize() int {
return int(atomic.LoadInt32(&c.working))
}
func (c *Client) checkConcurrentLimit() {
if c.polling.Size() > c.cfg.Concurrent {
<-c.waitChan
}
}
func (c *Client) ExecuteEx(statement string, async bool) (*ExecuteResult, error) {
// exceed the max of concurrent size
c.checkConcurrentLimit()
session, ok := c.pool.Require()
if !ok {
return nil, ErrClosing
}
defer c.pool.Release(session)
//if err, _ := session.SubmitEx("SET hive.execution.engine=spark;", false); err != nil {
//log.Error(err)
//}
//_, err := session.Submit("set spark.client.server.connect.timeout=600s;set hive.spark.job.monitor.timeout=600s;set hive.spark.client.connect.timeout=100000;")
//if err != nil {
// log.Error(err)
//}
log.Debug("Execute HQL: ", statement)
op, err := session.SubmitEx(statement, async)
if err != nil {
log.Errorf("ExecuteEx HQL error: %s, %s", statement, err.Error())
return nil, err
}
if c.logDetail {
go c.logResponseLog(op.Handle)
}
//启动了hive日志打印开关
return newResult(c, op, statement), nil
}
func (c *Client) Close() {
if !atomic.CompareAndSwapInt32(&c.closed, 0, 1) {
return
}
c.pool.Close()
}
//打印hive日志。注意本方法会一直执行直到hive结束(成功or失败),建议单起一个goroutine来执行本方法
func (c *Client) logResponseLog(handle *tcliservice.TOperationHandle) {
start := time.Now()
//每次 | type result struct {
hiveResult *ExecuteResult
err error
} | random_line_split |
client.go | ExecuteResult, error)
Close()
}
type Client struct {
cfg *Config
pool *Pool
stopChan chan struct{}
notifyPollingChan chan *PollItem
polling *Polling
closed int32
logDetail bool
logInterval Duration
working int32
waitChan chan struct{}
}
func NewClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = new(Config)
}
cfg.init()
pool, err := NewPool(cfg.Host, cfg.UserName, cfg.Password, cfg.DBName, cfg.MinSessionCount, cfg.MaxSessionCount)
if err != nil {
return nil, err
}
cli := &Client{
cfg: cfg,
pool: pool,
polling: NewPolling(),
waitChan: make(chan struct{}),
logDetail: cfg.LogDetail,
}
for i := 0; i < cfg.PollingThread; i++ {
go cli.pollingLoop()
}
return cli, nil
}
func (c *Client) AddPollingWithCallback(op *Operation, f func(*PollingResult)) {
c.polling.AddCallback(op, f)
}
func (c *Client) AddPolling(op *Operation) chan *PollingResult {
return c.polling.Add(op)
}
func (c *Client) pollingLoop() {
timeout := time.Second
timer := time.NewTimer(timeout)
// just make the exit quickly. Don't care about the processing tasks.
loop:
for atomic.LoadInt32(&c.closed) == 0 {
poll := c.polling.Poll()
if poll == nil {
timer.Reset(c.cfg.BackoffTime.Unwrap())
select {
case <-c.polling.NewPollingChan:
case <-c.stopChan:
break loop
case <-timer.C:
}
continue
}
session, ok := c.pool.Require()
if !ok {
break loop
}
state, err := session.GetState(poll.Op)
c.polling.SubStaging()
if err != nil {
log.Debug("session getState error : ", err)
// invalid handle ? ignore
c.pool.Release(session)
continue
}
if !state.IsCompleted() {
poll.MarkBackoff(c.cfg.BackoffTime.Unwrap())
c.polling.AddItem(poll)
c.pool.Release(session)
continue
}
var result PollingResult
if state.IsSuccess() {
// success, schema is not required
schema, _ := session.GetSchema(poll.Op)
if schema == nil {
schema = new(Schema)
}
result.Schema = schema
} else {
result.Error = state.GetError()
// error
}
c.pool.Release(session)
// wake up one block SubmitAsync if someone is waiting
select {
case c.waitChan <- struct{}{}:
default:
}
if poll.Callback != nil {
go poll.Callback(&result)
}
if poll.Chan != nil {
poll.Chan <- &result
}
}
}
func (c *Client) Fetch(op *Operation, o Orientation) (*SegmentResult, error) {
session, ok := c.pool.Require()
if !ok {
return nil, ErrClosing
}
result, err := session.FetchResult(op, o, c.cfg.BenchSize)
c.pool.Release(session)
if err != nil {
return nil, err
}
return result, err
}
func (c *Client) ResultSlice(sql string) ([]string, error) {
result, err := c.Execute(sql)
if err != nil {
return nil, err
}
var id string
var ids []string
for result.NextPage() {
for result.NextInPage() {
result.Scan(&id)
ids = append(ids, id)
}
}
if err := result.Err(); err != nil {
return nil, err
}
return ids, nil
}
func (c *Client) ResultCount(sql string) (int64, error) {
result, err := c.Execute(sql)
if err != nil {
return -1, err
}
var count int64
if result.Next() {
result.Scan(&count)
}
if err := result.Err(); err != nil {
return -1, err
}
return count, nil
}
func (c *Client) Execute(statement string) (*ExecuteResult, error) {
return c.ExecuteEx(statement, false)
}
func (c *Client) ExecuteAsyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
type result struct {
hiveResult *ExecuteResult
err error
}
retChan := make(chan *result, 1)
go func() {
if ret, err := c.ExecuteEx(statement, true); err != nil {
retChan <- &result{nil, err}
} else {
retChan <- &result{ret, nil}
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case ret := <-retChan:
if ret.err != nil {
return nil, ret.err
} else {
return ret.hiveResult, nil
}
}
}
func (c *Client) ExecuteSyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
type result struct {
hiveResult *ExecuteResult
err error
}
retChan := make(chan *result, 1)
go func() {
if ret, err := c.ExecuteEx(statement, false); err != nil {
retChan <- &result{nil, err}
} else {
retChan <- &result{ret, nil}
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case ret := <-retChan:
if ret.err != nil {
return nil, ret.err
} else {
return ret.hiveResult, nil
}
}
}
func (c *Client) SubmitAsync(statement string) (*ExecuteResult, error) {
return c.ExecuteEx(statement, true)
}
func (c *Client) SubmitAsyncCtx(ctx context.Context, statement string) (*ExecuteResult, error) {
return c.ExecuteAsyncCtx(ctx, statement)
}
func (c *Client) AddAsync(dimension *HiveContext, statement string, callback func(*ExecuteResult)) error {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
dimension.Add()
ret, err := c.SubmitAsync(statement)
if err != nil {
err = fmt.Errorf("%v: %v", err, statement)
dimension.Error(err)
return err
}
ret.RunOnFinish(func() {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
callback(ret)
dimension.Done()
})
return nil
}
func (c *Client) AddAsyncCtx(ctx context.Context, dimension *HiveContext, statement string, callback func(*ExecuteResult)) error {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
dimension.Add()
ret, err := c.SubmitAsyncCtx(ctx, statement)
if err != nil {
err = fmt.Errorf("%v: %v", err, statement)
dimension.Error(err)
return err
}
ret.RunOnFinish(func() {
if dimension.HasError() {
log.Error("hive context has error,please check")
}
callback(ret)
dimension.Done()
})
return nil
}
func (c *Client) StageSize() int {
return int(atomic.LoadInt32(&c.working))
}
func (c *Client) checkConcurrentLimit() {
if c.polling.Size() > c.cfg.Concurrent {
<-c.waitChan
}
}
func (c *Client) ExecuteEx(statement string, async bool) (*ExecuteResult, error) | log.Debug("Execute HQL: ", statement)
op, err := session.SubmitEx(statement, async)
if err != nil {
log.Errorf("ExecuteEx HQL error: %s, %s", statement, err.Error())
return nil, err
}
if c.logDetail {
go c.logResponseLog(op.Handle)
}
//启动了hive日志打印开关
return newResult(c, op, statement), nil
}
func (c *Client)
Close() {
if !atomic.CompareAndSwapInt32(&c.closed, 0, 1) {
return
}
c.pool.Close()
}
//打印hive日志。注意本方法会一直执行直到hive结束(成功or失败),建议单起一个goroutine来执行本方法
func (c *Client) logResponseLog(handle *tcliservice.TOperationHandle) {
start := time.Now()
//每次 | {
// exceed the max of concurrent size
c.checkConcurrentLimit()
session, ok := c.pool.Require()
if !ok {
return nil, ErrClosing
}
defer c.pool.Release(session)
//if err, _ := session.SubmitEx("SET hive.execution.engine=spark;", false); err != nil {
//log.Error(err)
//}
//_, err := session.Submit("set spark.client.server.connect.timeout=600s;set hive.spark.job.monitor.timeout=600s;set hive.spark.client.connect.timeout=100000;")
//if err != nil {
// log.Error(err)
//}
| identifier_body |
grid-ref.ts | since some browsers will
// // // blur the elements, for sub-pixel transforms.
// // const offsetXPx = coerceCssPixelValue(Math.round(offset.x));
// // const offsetYPx = coerceCssPixelValue(Math.round(offset.y));
// // elementToOffset.style.transform = `translate3d(${offsetXPx}, ${offsetYPx}, 0)`;
// // adjustClientRect(sibling.clientRect, offset.y, offset.x);
// // }, siblings);
// // Note that it's important that we do this after the client rects have been adjusted.
// // this._previousSwap.overlaps = isInsideClientRect(newPosition, pointerX, pointerY);
// // this._previousSwap.drag = siblingAtNewPosition.drag;
// // this._previousSwap.delta = pointerDelta;
// }
public sort(dragElement: HTMLElement, currentIndex: number, newIndex: number): void {
console.log(currentIndex);
const hoverIndex = utils.findIndex(dragElement);
const el = this.getSibling(dragElement, currentIndex, hoverIndex);
this._activeDraggables.forEach(drag => {
const insertedNode = dragElement.parentNode.insertBefore(drag.getVisibleElement(), el.node);
// this._class.addPlaceHolderClass(insertedNode as Element);
});
this.dragIndex = utils.findIndex(this._activeDraggables[0].getVisibleElement());
}
private getSibling(dropElement: any, dragIndex: number, hoverIndex: number): any | null {
if (dragIndex < hoverIndex) {
return { node: dropElement.nextSibling, originalIndex: hoverIndex + 1 };
}
return { node: dropElement, originalIndex: hoverIndex };
}
/**
* Gets the index of an item in the drop container, based on the position of the user's pointer.
* @param item Item that is being sorted.
* @param pointerX Position of the user's pointer along the X axis.
* @param pointerY Position of the user's pointer along the Y axis.
* @param delta Direction in which the user is moving their pointer.
*/
// protected _getItemIndexFromPointerPosition(item: GridItemRef, pointerX: number, pointerY: number,
// delta?: { x: number, y: number }): number {
// const element = document.elementFromPoint(pointerX, pointerY);
// function __findIndex(element: Element): number {
// const allElements = element.parentElement.children;
// return Array.prototype.indexOf.call(allElements, element);
// }
// console.log(__findIndex(element));
// return __findIndex(element);
// console.log();
// const itemPositions: CachedGridItemPosition[] = this._getCachedGridItemPositions(pointerY);
// let _rowOffset: number = 0;
// let index = findIndex(itemPositions, ({ drag, clientRect, rowOffset }, _, array) => {
// if (drag === item) {
// // If there's only one item left in the container, it must be
// // the dragged item itself so we use it as a reference.
// return array.length < 2;
// }
// if (delta) {
// const direction = delta;
// // If the user is still hovering over the same item as last time, their cursor hasn't left
// // the item after we made the swap, and they didn't change the direction in which they're
// // dragging, we don't consider it a direction swap.
// if (drag === this._previousSwap.drag && this._previousSwap.overlaps &&
// direction.x === this._previousSwap.delta.x && direction.y === this._previousSwap.delta.y) {
// return false;
// }
// }
// _rowOffset = rowOffset;
// return pointerX >= Math.floor(clientRect.left) && pointerX < Math.floor(clientRect.right) &&
// pointerY >= Math.floor(clientRect.top) && pointerY < Math.floor(clientRect.bottom);
// });
// return (index === -1 || !this.sortPredicate(index, item, this)) ? -1 : _rowOffset;
// }
protected _reset(): void {
super._reset();
this._previousSwap.delta = { x: 0, y: 0 };
this._grid.clear();
}
/**
* Gets the offset in pixels by which the item that is being dragged should be moved.
* @param currentPosition Current position of the item.
* @param newPosition Position of the item where the current item should be moved.
* @param delta Direction in which the user is moving.
*/
protected _getItemOffsetPxls(currentPosition: ClientRect, newPosition: ClientRect, delta: 1 | -1, isHorizontal: boolean) {
let itemOffset = isHorizontal ? newPosition.left - currentPosition.left :
newPosition.top - currentPosition.top;
// Account for differences in the item width/height.
// console.log(delta)
if (delta === -1) {
itemOffset += isHorizontal ? newPosition.width - currentPosition.width :
newPosition.height - currentPosition.height;
}
return itemOffset;
}
/**
* Gets the offset in pixels by which the item that is being dragged should be moved.
* @param currentPosition Current position of the item.
* @param newPosition Position of the item where the current item should be moved.
* @param delta Direction in which the user is moving.
*/
private _getItemOffset(currentPosition: ClientRect, newPosition: ClientRect, delta: 1 | -1, pointerDelta: Point): Point {
let itemXOffset: number = 0;
let itemYOffset: number = 0;
itemXOffset = newPosition.left - currentPosition.left;//this._getItemOffsetPxls(currentPosition, newPosition, delta as any, false);
itemYOffset = newPosition.top - currentPosition.top; //this._getItemOffsetPxls(currentPosition, newPosition, delta as any, true);
if (delta === -1) {
if (pointerDelta.x === 1) {
itemXOffset += newPosition.width - currentPosition.width;
}
}
if (delta === -1) {
if (pointerDelta.y === 1) {
itemYOffset += newPosition.height - currentPosition.height;
}
}
console.log({ x: itemXOffset, y: itemYOffset });
return { x: itemXOffset, y: itemYOffset };
}
/**
* Gets the offset in pixels by which the items that aren't being dragged should be moved.
* @param currentIndex Index of the item currently being dragged.
* @param siblings All of the items in the list.
* @param delta Direction in which the user is moving.
*/
private _getSiblingOffset(currentIndex: number, newIndex: number, delta: number): Point {
const siblings = this._itemPositions;
const isHorizontal = true;
const currentPosition = siblings[currentIndex].clientRect;
const newPosition = siblings[newIndex].clientRect;
// console.log(pointerDelta);
const immediateSibling = siblings[newIndex];
let siblingXOffset = newPosition['width'] * delta;
let siblingYOffset = newPosition['height'] * delta;
// console.log(immediateSibling);
if (immediateSibling) {
const start = isHorizontal ? 'left' : 'top';
const end = isHorizontal ? 'right' : 'bottom';
// Get the spacing between the start of the current item and the end of the one immediately
// after it in the direction in which the user is dragging, or vice versa. We add it to the
// offset in order to push the element to where it will be when it's inline and is influenced
// by the `margin` of its siblings.
if (delta === -1) {
siblingXOffset -= immediateSibling.clientRect['left'] - currentPosition['right'];
siblingYOffset -= immediateSibling.clientRect['top'] - currentPosition['bottom'];
} else {
siblingXOffset += currentPosition['left'] - immediateSibling.clientRect['right'];
siblingYOffset += currentPosition['top'] - immediateSibling.clientRect['bottom'];
}
}
return { x: siblingXOffset, y: siblingYOffset };
}
private _updateItemPositionCache(item: CachedGridItemPosition) {
const index = rFindIndex(currentItem => currentItem.drag === item.drag, this._itemPositions);
this._itemPositions[index] = item;
}
private _getGridItemCurrentIndex(item: GridItemRef): number {
const index = rFindIndex(currentItem => currentItem.drag === item, this._itemPositions);
// console.log('_getGridItemCurrentIndex', index);
// console.log(this._itemPositions[index]);
return index
}
/** get the current siblings based on the current y pointer position */
private _getCachedGridItemPositions(pointerY: number): CachedGridItemPosition[] | {
const grid = this._grid;
const rowIndexes = toArray(this._grid.keys());
return reduce((acc, rowIndex) => {
const rowItemPositions = grid.get(rowIndex)
const { clientRect } = head(rowItemPositions);
const { top, bottom } = clientRect;
if (pointerY >= floor(top) && pointerY < floor(bottom)) {
acc = rowItemPositions;
}
return acc;
}, [], rowIndexes);
} | identifier_body |
|
grid-ref.ts | < top) rowOffset += grid.get(k).length;
})
item = { ...item, rowOffset };
grid.set(top, [item]);
} else {
let row = grid.get(top);
const rowOffset = last(row).rowOffset + 1;
item = { ...item, rowOffset };
row = addItemToGrid(item, row);
grid.set(top, row);
}
this._updateItemPositionCache(item);
}, siblings);
}
protected _cacheItemPositions(): void {
const itemPositions = map(drag => {
const elementToMeasure = drag.getVisibleElement();
return { drag, offset: 0, clientRect: getMutableClientRect(elementToMeasure) };
}, this._activeDraggables)
this._itemPositions = flatten(groupWith(pathEq(['clientRect', 'top']), itemPositions));
}
/**
* Figures out the index of an item in the container.
* @param item Item whose index should be determined.
*/
public getItemIndex(item: GridItemRef): number {
if (!this._isDragging) return this._draggables.indexOf(item);
return rFindIndex(currentItem => currentItem.drag === item, this._itemPositions);
}
/**
* Sorts an item inside the container based on its position.
* @param item Item to be sorted.
* @param pointerX Position of the item along the X axis.
* @param pointerY Position of the item along the Y axis.
* @param pointerDelta Direction in which the pointer is moving along each axis.
*/
public _sortItem(
item: GridItemRef,
pointerX: number,
pointerY: number,
pointerDelta: Point
): void {
const insideClientRect = isPointerNearClientRect(this._clientRect, DROP_PROXIMITY_THRESHOLD, pointerX, pointerY);
// Don't sort the item if sorting is disabled or it's out of range.
if (this.sortingDisabled || !insideClientRect) return;
const siblings = this._itemPositions;
const newIndex = this._getItemIndexFromPointerPosition(item, pointerX, pointerY, pointerDelta);
if (newIndex === -1 && siblings.length > 0) return;
const currentIndex = this._getGridItemCurrentIndex(item);
if (newIndex === currentIndex) return;
const siblingAtNewPosition = siblings[newIndex];
const currentPosition = siblings[currentIndex].clientRect;
const newPosition = siblingAtNewPosition.clientRect;
const delta = currentIndex > newIndex ? 1 : -1;
// How many pixels the item's placeholder should be offset.
const itemOffset = this._getItemOffsetPx(currentPosition, newPosition, delta);
// How many pixels all the other items should be offset.
const siblingOffset = this._getSiblingOffsetPx(currentIndex, siblings, delta);
// Save the previous order of the items before moving the item to its new index.
// We use this to check whether an item has been moved as a result of the sorting.
const oldOrder = siblings.slice();
// Shuffle the array in place.
moveItemInArray(siblings, currentIndex, newIndex);
this.sorted.next({
previousIndex: currentIndex,
currentIndex: newIndex,
container: this,
item
});
forEachIndexed((sibling: CachedGridItemPosition, index: number) => {
// Don't do anything if the position hasn't changed.
if (oldOrder[index] === sibling) return;
const isDraggedItem = sibling.drag === item; | sibling.drag.getRootElement();
// Update the offset to reflect the new position.
// console.log(offset, isDraggedItem);
// sibling.gridOffset.x += offset.x;
// sibling.gridOffset.y += offset.y;
sibling.offset += offset;
// Round the transforms since some browsers will
// blur the elements, for sub-pixel transforms.
const offsetXPx = coerceCssPixelValue(Math.round(sibling.offset));
// const offsetYPx = coerceCssPixelValue(Math.round(sibling.gridOffset.y));
// console.log(offsetXPx, offsetYPx);
elementToOffset.style.transform = `translate3d(${offsetXPx}, 0, 0)`;
adjustClientRect(sibling.clientRect, 0, offset);
}, siblings);
}
// public _sortItem(
// item: GridItemRef,
// pointerX: number,
// pointerY: number,
// pointerDelta: Point
// ): void {
// const insideClientRect = isPointerNearClientRect(this._clientRect, DROP_PROXIMITY_THRESHOLD, pointerX, pointerY);
// // Don't sort the item if sorting is disabled or it's out of range.
// if (this.sortingDisabled || !insideClientRect) return;
// const siblings = this._itemPositions;
// const newIndex = this._getItemIndexFromPointerPosition(item, pointerX, pointerY, pointerDelta);
// if (newIndex === -1 && siblings.length > 0) return;
// const currentIndex = this._getGridItemCurrentIndex(item);
// console.log('currentIndex :', currentIndex)
// console.log('newIndex :', newIndex);
// const siblingAtNewPosition = siblings[newIndex];
// const currentPosition = siblings[currentIndex].clientRect;
// const newPosition = siblingAtNewPosition.clientRect;
// const delta = currentIndex > newIndex ? 1 : -1;
// // How many pixels the item's placeholder should be offset.
// const itemOffset = this._getItemOffset(currentPosition, newPosition, delta, pointerDelta);
// // How many pixels all the other items should be offset.
// const siblingOffset = this._getSiblingOffset(currentIndex, pointerY, delta, pointerDelta);
// // Save the previous order of the items before moving the item to its new index.
// // We use this to check whether an item has been moved as a result of the sorting.
// const oldOrder = siblings.slice();
// // Shuffle the array in place.
// moveItemInArray(siblings, currentIndex, newIndex);
// this.sorted.next({
// previousIndex: currentIndex,
// currentIndex: newIndex,
// container: this,
// item
// });
// this.sort(item.getVisibleElement(), currentIndex, newIndex);
// // forEachIndexed((sibling: CachedGridItemPosition, index: number) => {
// // // Don't do anything if the position hasn't changed.
// // if (oldOrder[index] === sibling) return;
// // const isDraggedItem = sibling.drag === item;
// // const offset = isDraggedItem ? itemOffset : siblingOffset;
// // const elementToOffset = isDraggedItem ? item.getPlaceholderElement() :
// // sibling.drag.getRootElement();
// // // Update the offset to reflect the new position.
// // // console.log(offset, isDraggedItem);
// // sibling.gridOffset.x += offset.x;
// // sibling.gridOffset.y += offset.y;
// // // Round the transforms since some browsers will
// // // blur the elements, for sub-pixel transforms.
// // const offsetXPx = coerceCssPixelValue(Math.round(offset.x));
// // const offsetYPx = coerceCssPixelValue(Math.round(offset.y));
// // elementToOffset.style.transform = `translate3d(${offsetXPx}, ${offsetYPx}, 0)`;
// // adjustClientRect(sibling.clientRect, offset.y, offset.x);
// // }, siblings);
// // Note that it's important that we do this after the client rects have been adjusted.
// // this._previousSwap.overlaps = isInsideClientRect(newPosition, pointerX, pointerY);
// // this._previousSwap.drag = siblingAtNewPosition.drag;
// // this._previousSwap.delta = pointerDelta;
// }
public sort(dragElement: HTMLElement, currentIndex: number, newIndex: number): void {
console.log(currentIndex);
const hoverIndex = utils.findIndex(dragElement);
const el = this.getSibling(dragElement, currentIndex, hoverIndex);
this._activeDraggables.forEach(drag => {
const insertedNode = dragElement.parentNode.insertBefore(drag.getVisibleElement(), el.node);
// this._class.addPlaceHolderClass(insertedNode as Element);
});
this.dragIndex = utils.findIndex(this._activeDraggables[0].getVisibleElement());
}
private getSibling(dropElement: any, dragIndex: number, hoverIndex: number): any | null {
if (dragIndex < hoverIndex) {
return { node: dropElement.nextSibling, originalIndex: hoverIndex + 1 };
}
return { node: dropElement, originalIndex: hoverIndex };
}
/**
* Gets the index of an item in the drop container, based on the position of the user's pointer.
* @param item Item that is being sorted.
* @param pointerX Position of the user's pointer along the X axis.
* @param pointerY Position of the user's pointer along the Y axis.
* @ | const offset = isDraggedItem ? itemOffset : siblingOffset;
const elementToOffset = isDraggedItem ? item.getPlaceholderElement() : | random_line_split |
grid-ref.ts | k < top) rowOffset += grid.get(k).length;
})
item = { ...item, rowOffset };
grid.set(top, [item]);
} else {
let row = grid.get(top);
const rowOffset = last(row).rowOffset + 1;
item = { ...item, rowOffset };
row = addItemToGrid(item, row);
grid.set(top, row);
}
this._updateItemPositionCache(item);
}, siblings);
}
protected | (): void {
const itemPositions = map(drag => {
const elementToMeasure = drag.getVisibleElement();
return { drag, offset: 0, clientRect: getMutableClientRect(elementToMeasure) };
}, this._activeDraggables)
this._itemPositions = flatten(groupWith(pathEq(['clientRect', 'top']), itemPositions));
}
/**
* Figures out the index of an item in the container.
* @param item Item whose index should be determined.
*/
public getItemIndex(item: GridItemRef): number {
if (!this._isDragging) return this._draggables.indexOf(item);
return rFindIndex(currentItem => currentItem.drag === item, this._itemPositions);
}
/**
* Sorts an item inside the container based on its position.
* @param item Item to be sorted.
* @param pointerX Position of the item along the X axis.
* @param pointerY Position of the item along the Y axis.
* @param pointerDelta Direction in which the pointer is moving along each axis.
*/
public _sortItem(
item: GridItemRef,
pointerX: number,
pointerY: number,
pointerDelta: Point
): void {
const insideClientRect = isPointerNearClientRect(this._clientRect, DROP_PROXIMITY_THRESHOLD, pointerX, pointerY);
// Don't sort the item if sorting is disabled or it's out of range.
if (this.sortingDisabled || !insideClientRect) return;
const siblings = this._itemPositions;
const newIndex = this._getItemIndexFromPointerPosition(item, pointerX, pointerY, pointerDelta);
if (newIndex === -1 && siblings.length > 0) return;
const currentIndex = this._getGridItemCurrentIndex(item);
if (newIndex === currentIndex) return;
const siblingAtNewPosition = siblings[newIndex];
const currentPosition = siblings[currentIndex].clientRect;
const newPosition = siblingAtNewPosition.clientRect;
const delta = currentIndex > newIndex ? 1 : -1;
// How many pixels the item's placeholder should be offset.
const itemOffset = this._getItemOffsetPx(currentPosition, newPosition, delta);
// How many pixels all the other items should be offset.
const siblingOffset = this._getSiblingOffsetPx(currentIndex, siblings, delta);
// Save the previous order of the items before moving the item to its new index.
// We use this to check whether an item has been moved as a result of the sorting.
const oldOrder = siblings.slice();
// Shuffle the array in place.
moveItemInArray(siblings, currentIndex, newIndex);
this.sorted.next({
previousIndex: currentIndex,
currentIndex: newIndex,
container: this,
item
});
forEachIndexed((sibling: CachedGridItemPosition, index: number) => {
// Don't do anything if the position hasn't changed.
if (oldOrder[index] === sibling) return;
const isDraggedItem = sibling.drag === item;
const offset = isDraggedItem ? itemOffset : siblingOffset;
const elementToOffset = isDraggedItem ? item.getPlaceholderElement() :
sibling.drag.getRootElement();
// Update the offset to reflect the new position.
// console.log(offset, isDraggedItem);
// sibling.gridOffset.x += offset.x;
// sibling.gridOffset.y += offset.y;
sibling.offset += offset;
// Round the transforms since some browsers will
// blur the elements, for sub-pixel transforms.
const offsetXPx = coerceCssPixelValue(Math.round(sibling.offset));
// const offsetYPx = coerceCssPixelValue(Math.round(sibling.gridOffset.y));
// console.log(offsetXPx, offsetYPx);
elementToOffset.style.transform = `translate3d(${offsetXPx}, 0, 0)`;
adjustClientRect(sibling.clientRect, 0, offset);
}, siblings);
}
// public _sortItem(
// item: GridItemRef,
// pointerX: number,
// pointerY: number,
// pointerDelta: Point
// ): void {
// const insideClientRect = isPointerNearClientRect(this._clientRect, DROP_PROXIMITY_THRESHOLD, pointerX, pointerY);
// // Don't sort the item if sorting is disabled or it's out of range.
// if (this.sortingDisabled || !insideClientRect) return;
// const siblings = this._itemPositions;
// const newIndex = this._getItemIndexFromPointerPosition(item, pointerX, pointerY, pointerDelta);
// if (newIndex === -1 && siblings.length > 0) return;
// const currentIndex = this._getGridItemCurrentIndex(item);
// console.log('currentIndex :', currentIndex)
// console.log('newIndex :', newIndex);
// const siblingAtNewPosition = siblings[newIndex];
// const currentPosition = siblings[currentIndex].clientRect;
// const newPosition = siblingAtNewPosition.clientRect;
// const delta = currentIndex > newIndex ? 1 : -1;
// // How many pixels the item's placeholder should be offset.
// const itemOffset = this._getItemOffset(currentPosition, newPosition, delta, pointerDelta);
// // How many pixels all the other items should be offset.
// const siblingOffset = this._getSiblingOffset(currentIndex, pointerY, delta, pointerDelta);
// // Save the previous order of the items before moving the item to its new index.
// // We use this to check whether an item has been moved as a result of the sorting.
// const oldOrder = siblings.slice();
// // Shuffle the array in place.
// moveItemInArray(siblings, currentIndex, newIndex);
// this.sorted.next({
// previousIndex: currentIndex,
// currentIndex: newIndex,
// container: this,
// item
// });
// this.sort(item.getVisibleElement(), currentIndex, newIndex);
// // forEachIndexed((sibling: CachedGridItemPosition, index: number) => {
// // // Don't do anything if the position hasn't changed.
// // if (oldOrder[index] === sibling) return;
// // const isDraggedItem = sibling.drag === item;
// // const offset = isDraggedItem ? itemOffset : siblingOffset;
// // const elementToOffset = isDraggedItem ? item.getPlaceholderElement() :
// // sibling.drag.getRootElement();
// // // Update the offset to reflect the new position.
// // // console.log(offset, isDraggedItem);
// // sibling.gridOffset.x += offset.x;
// // sibling.gridOffset.y += offset.y;
// // // Round the transforms since some browsers will
// // // blur the elements, for sub-pixel transforms.
// // const offsetXPx = coerceCssPixelValue(Math.round(offset.x));
// // const offsetYPx = coerceCssPixelValue(Math.round(offset.y));
// // elementToOffset.style.transform = `translate3d(${offsetXPx}, ${offsetYPx}, 0)`;
// // adjustClientRect(sibling.clientRect, offset.y, offset.x);
// // }, siblings);
// // Note that it's important that we do this after the client rects have been adjusted.
// // this._previousSwap.overlaps = isInsideClientRect(newPosition, pointerX, pointerY);
// // this._previousSwap.drag = siblingAtNewPosition.drag;
// // this._previousSwap.delta = pointerDelta;
// }
public sort(dragElement: HTMLElement, currentIndex: number, newIndex: number): void {
console.log(currentIndex);
const hoverIndex = utils.findIndex(dragElement);
const el = this.getSibling(dragElement, currentIndex, hoverIndex);
this._activeDraggables.forEach(drag => {
const insertedNode = dragElement.parentNode.insertBefore(drag.getVisibleElement(), el.node);
// this._class.addPlaceHolderClass(insertedNode as Element);
});
this.dragIndex = utils.findIndex(this._activeDraggables[0].getVisibleElement());
}
private getSibling(dropElement: any, dragIndex: number, hoverIndex: number): any | null {
if (dragIndex < hoverIndex) {
return { node: dropElement.nextSibling, originalIndex: hoverIndex + 1 };
}
return { node: dropElement, originalIndex: hoverIndex };
}
/**
* Gets the index of an item in the drop container, based on the position of the user's pointer.
* @param item Item that is being sorted.
* @param pointerX Position of the user's pointer along the X axis.
* @param pointerY Position of the user's pointer along the Y axis.
* | _cacheItemPositions | identifier_name |
main.rs | // parse toml config files
let mut specs: Vec<Spec> = vec![];
for filename in matches.opt_strs("f") {
consume_specs_toml(&filename[..], &mut specs);
}
if specs.len() == 0 {
let mut spec = Spec::new();
spec.start = Regex::new(r"\berror\b").ok();
specs.push(spec);
}
// perform
let lines = match matches.free.len() {
0 => |
1 => {
let path = path::Path::new(&matches.free[0]);
match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", matches.free[0], why.to_string()) },
Ok(ref mut f) => { read_lines(f) },
}
}
_ => { panic!("too many filename arguments ({}), expected just one", matches.free.len()) }
};
logselect(specs, lines, &mut io::stdout())
}
fn read_lines(reader: &mut io::Read) -> Vec<String>
{
let mut rv = Vec::new();
for line_res in io::BufReader::new(reader).lines() {
rv.push(line_res.unwrap());
}
return rv
}
fn logselect(specs: Vec<Spec>, lines: Vec<String>, writer: &mut io::Write)
{
let work = Work { lines : lines, specs : specs, index : sync::Mutex::new(0) };
let work = sync::Arc::new(work);
let (sender, receiver) = mpsc::channel();
let num_cpus = num_cpus::get();
let mut threads = Vec::with_capacity(num_cpus);
for _ in 0..threads.capacity() {
let sender = sender.clone();
let work = work.clone();
threads.push(thread::spawn(move|| {
loop {
let portion_size = 100;
let i = {
let mut p = work.index.lock().unwrap();
let rv = *p;
*p += portion_size as isize;
rv as usize
};
if i >= work.lines.len() {
sender.send( (-1, -1) ).unwrap();
break;
}
for line_index in i..i+portion_size {
if line_index >= work.lines.len() { break }
for spec in &work.specs {
process_spec(&spec, line_index, &work.lines, &sender);
}
}
}
}));
}
let mut selected_indexes = fixedbitset::FixedBitSet::with_capacity(work.lines.len());
let mut num_finished = 0;
while num_finished < threads.len() {
match receiver.recv().unwrap() {
(-1,-1) => { num_finished += 1 }
(a,b) => for i in a..b {
selected_indexes.set(i as usize, true);
}
}
}
// output
let mut prev_index = 0;
for index in 0..work.lines.len() {
if selected_indexes[index] {
if prev_index > 0 {
if index + 1 - prev_index > 1 {
writer.write(b"\n... ... ...\n\n").unwrap();
}
}
writer.write(work.lines[index].as_bytes()).unwrap();
writer.write(b"\n").unwrap();
prev_index = index + 1;
}
}
}
struct Work
{
lines: Vec<String>,
specs: Vec<Spec>,
index: sync::Mutex<isize>,
}
fn process_spec(spec: &Spec, line_index: usize, lines: &Vec<String>, sender: &mpsc::Sender<(isize, isize)>)
{
if let Some(ref rx) = spec.start {
if rx.is_match(&lines[line_index][..]) {
let sel_range = if spec.stop.is_some() || spec.whale.is_some() { try_select(&spec, lines, line_index as isize) } else { Some((line_index as isize,line_index as isize)) };
if let Some((a0,b0)) = sel_range {
let (a, b) = (a0 + spec.start_offset, b0 + spec.stop_offset);
// std::cmp should have this function
fn clamp<T>(a: T, x: T, b: T) -> T where T: Ord { std::cmp::min(std::cmp::max(a, x), b) }
let last_index = (lines.len() - 1) as isize;
let (a, b) = (clamp(0, a, last_index), clamp(0, b, last_index));
// if after applying offsets the range remains nonempty
if a0 <= b0 {
sender.send( (a, b+1) ).unwrap()
} else {
sender.send( (b, a+1) ).unwrap()
}
}
}
}
}
fn consume_specs_toml(filename: &str, specs: &mut Vec<Spec>)
{
let path = path::Path::new(filename);
let mut file = match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", filename, why.to_string()) }
Ok(f) => f
};
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
let table = match content.parse::<toml::Value>() {
Ok(toml::Value::Table(t)) => { t }
Ok(_) => { panic!("parse error in {}: root value is not a table", filename) }
Err(toml_err) => { panic!("parse error in {}: {}", filename, toml_err) }
};
consume_specs_toml_table(&table, specs);
}
#[derive(Clone)]
struct Spec
{
disable: bool,
start: Option<Regex>,
start_offset: isize,
stop: Option<Regex>,
stop_offset: isize,
whale: Option<Regex>,
backward: bool,
limit: isize,
}
impl Spec
{
fn new() -> Self
{
Spec { disable: false, start: None, start_offset: 0, stop: None, stop_offset: 0, whale: None, backward: false, limit: 1000 }
}
}
fn consume_specs_toml_table(table: &toml::value::Table, specs: &mut Vec<Spec>)
{
use toml::Value::*;
let mut spec = Spec::new();
for (key, value) in table {
match &key[..] {
"disable" => {
match *value {
Boolean(x) => { spec.disable = x }
_ => { panic!("`disable` key must be boolean") }
}
}
"start" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.start = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`start` key must be regex string") }
}
}
"start_offset" => { match *value {
Integer(ofs) => { spec.start_offset = ofs as isize; }
_ => { panic!("`start_offset` must be integer") }
} }
"stop" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.stop = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`stop` key must be regex string") }
}
}
"stop_offset" => { match *value {
Integer(ofs) => { spec.stop_offset = ofs as isize; }
_ => { panic!("`stop_offset` must be integer") }
} }
"while" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.whale = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`while` key must be regex string") }
}
}
"direction" => {
match *value {
String(ref s) => { match &s[..] {
"forward" | "fwd" | "down" => { spec.backward = false }
"backward" | "backwards" | "back" | "up" => { spec.backward = true }
ss => { panic!("`direction` value '{}' unrecognized (must be 'forward' or 'backward')", ss) }
} }
_ => { panic!("`direction` must be a string") }
}
}
"limit" => { match *value {
Integer(lim) if lim > 0 => { spec.limit = lim as isize; }
_ => { panic!("`limit` must be a positive integer") }
} }
_ => { match *value {
Table(ref t) => { consume_specs_toml_table(&t, specs) }
_ => { panic!("unrecognized key: {}", key) }
} }
}
}
if !spec.disable && spec.start.is_some() {
specs | { read_lines(&mut io::stdin()) } | conditional_block |
main.rs | // parse toml config files
let mut specs: Vec<Spec> = vec![];
for filename in matches.opt_strs("f") {
consume_specs_toml(&filename[..], &mut specs);
}
if specs.len() == 0 {
let mut spec = Spec::new();
spec.start = Regex::new(r"\berror\b").ok();
specs.push(spec);
}
// perform
let lines = match matches.free.len() {
0 => { read_lines(&mut io::stdin()) }
1 => {
let path = path::Path::new(&matches.free[0]);
match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", matches.free[0], why.to_string()) },
Ok(ref mut f) => { read_lines(f) },
}
}
_ => { panic!("too many filename arguments ({}), expected just one", matches.free.len()) }
};
logselect(specs, lines, &mut io::stdout())
}
fn read_lines(reader: &mut io::Read) -> Vec<String>
{
let mut rv = Vec::new();
for line_res in io::BufReader::new(reader).lines() {
rv.push(line_res.unwrap());
}
return rv
}
fn logselect(specs: Vec<Spec>, lines: Vec<String>, writer: &mut io::Write)
{
let work = Work { lines : lines, specs : specs, index : sync::Mutex::new(0) };
let work = sync::Arc::new(work);
let (sender, receiver) = mpsc::channel();
let num_cpus = num_cpus::get();
let mut threads = Vec::with_capacity(num_cpus);
for _ in 0..threads.capacity() {
let sender = sender.clone();
let work = work.clone();
threads.push(thread::spawn(move|| {
loop {
let portion_size = 100;
let i = {
let mut p = work.index.lock().unwrap();
let rv = *p;
*p += portion_size as isize;
rv as usize
};
if i >= work.lines.len() {
sender.send( (-1, -1) ).unwrap();
break;
}
for line_index in i..i+portion_size {
if line_index >= work.lines.len() { break }
for spec in &work.specs {
process_spec(&spec, line_index, &work.lines, &sender);
}
}
}
}));
}
let mut selected_indexes = fixedbitset::FixedBitSet::with_capacity(work.lines.len());
let mut num_finished = 0;
while num_finished < threads.len() {
match receiver.recv().unwrap() {
(-1,-1) => { num_finished += 1 }
(a,b) => for i in a..b {
selected_indexes.set(i as usize, true);
}
}
}
// output
let mut prev_index = 0;
for index in 0..work.lines.len() {
if selected_indexes[index] {
if prev_index > 0 {
if index + 1 - prev_index > 1 {
writer.write(b"\n... ... ...\n\n").unwrap();
}
}
writer.write(work.lines[index].as_bytes()).unwrap();
writer.write(b"\n").unwrap();
prev_index = index + 1;
}
}
}
struct Work
{
lines: Vec<String>,
specs: Vec<Spec>,
index: sync::Mutex<isize>,
}
fn | (spec: &Spec, line_index: usize, lines: &Vec<String>, sender: &mpsc::Sender<(isize, isize)>)
{
if let Some(ref rx) = spec.start {
if rx.is_match(&lines[line_index][..]) {
let sel_range = if spec.stop.is_some() || spec.whale.is_some() { try_select(&spec, lines, line_index as isize) } else { Some((line_index as isize,line_index as isize)) };
if let Some((a0,b0)) = sel_range {
let (a, b) = (a0 + spec.start_offset, b0 + spec.stop_offset);
// std::cmp should have this function
fn clamp<T>(a: T, x: T, b: T) -> T where T: Ord { std::cmp::min(std::cmp::max(a, x), b) }
let last_index = (lines.len() - 1) as isize;
let (a, b) = (clamp(0, a, last_index), clamp(0, b, last_index));
// if after applying offsets the range remains nonempty
if a0 <= b0 {
sender.send( (a, b+1) ).unwrap()
} else {
sender.send( (b, a+1) ).unwrap()
}
}
}
}
}
fn consume_specs_toml(filename: &str, specs: &mut Vec<Spec>)
{
let path = path::Path::new(filename);
let mut file = match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", filename, why.to_string()) }
Ok(f) => f
};
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
let table = match content.parse::<toml::Value>() {
Ok(toml::Value::Table(t)) => { t }
Ok(_) => { panic!("parse error in {}: root value is not a table", filename) }
Err(toml_err) => { panic!("parse error in {}: {}", filename, toml_err) }
};
consume_specs_toml_table(&table, specs);
}
#[derive(Clone)]
struct Spec
{
disable: bool,
start: Option<Regex>,
start_offset: isize,
stop: Option<Regex>,
stop_offset: isize,
whale: Option<Regex>,
backward: bool,
limit: isize,
}
impl Spec
{
fn new() -> Self
{
Spec { disable: false, start: None, start_offset: 0, stop: None, stop_offset: 0, whale: None, backward: false, limit: 1000 }
}
}
fn consume_specs_toml_table(table: &toml::value::Table, specs: &mut Vec<Spec>)
{
use toml::Value::*;
let mut spec = Spec::new();
for (key, value) in table {
match &key[..] {
"disable" => {
match *value {
Boolean(x) => { spec.disable = x }
_ => { panic!("`disable` key must be boolean") }
}
}
"start" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.start = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`start` key must be regex string") }
}
}
"start_offset" => { match *value {
Integer(ofs) => { spec.start_offset = ofs as isize; }
_ => { panic!("`start_offset` must be integer") }
} }
"stop" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.stop = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`stop` key must be regex string") }
}
}
"stop_offset" => { match *value {
Integer(ofs) => { spec.stop_offset = ofs as isize; }
_ => { panic!("`stop_offset` must be integer") }
} }
"while" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.whale = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`while` key must be regex string") }
}
}
"direction" => {
match *value {
String(ref s) => { match &s[..] {
"forward" | "fwd" | "down" => { spec.backward = false }
"backward" | "backwards" | "back" | "up" => { spec.backward = true }
ss => { panic!("`direction` value '{}' unrecognized (must be 'forward' or 'backward')", ss) }
} }
_ => { panic!("`direction` must be a string") }
}
}
"limit" => { match *value {
Integer(lim) if lim > 0 => { spec.limit = lim as isize; }
_ => { panic!("`limit` must be a positive integer") }
} }
_ => { match *value {
Table(ref t) => { consume_specs_toml_table(&t, specs) }
_ => { panic!("unrecognized key: {}", key) }
} }
}
}
if !spec.disable && spec.start.is_some() {
specs.push | process_spec | identifier_name |
main.rs | // parse toml config files
let mut specs: Vec<Spec> = vec![];
for filename in matches.opt_strs("f") {
consume_specs_toml(&filename[..], &mut specs);
}
if specs.len() == 0 {
let mut spec = Spec::new();
spec.start = Regex::new(r"\berror\b").ok();
specs.push(spec);
}
// perform
let lines = match matches.free.len() {
0 => { read_lines(&mut io::stdin()) }
1 => {
let path = path::Path::new(&matches.free[0]);
match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", matches.free[0], why.to_string()) },
Ok(ref mut f) => { read_lines(f) },
}
}
_ => { panic!("too many filename arguments ({}), expected just one", matches.free.len()) }
};
logselect(specs, lines, &mut io::stdout())
}
fn read_lines(reader: &mut io::Read) -> Vec<String>
{
let mut rv = Vec::new();
for line_res in io::BufReader::new(reader).lines() {
rv.push(line_res.unwrap());
}
return rv
}
fn logselect(specs: Vec<Spec>, lines: Vec<String>, writer: &mut io::Write)
{
let work = Work { lines : lines, specs : specs, index : sync::Mutex::new(0) };
let work = sync::Arc::new(work);
let (sender, receiver) = mpsc::channel();
let num_cpus = num_cpus::get();
let mut threads = Vec::with_capacity(num_cpus);
for _ in 0..threads.capacity() {
let sender = sender.clone();
let work = work.clone();
threads.push(thread::spawn(move|| {
loop {
let portion_size = 100;
let i = {
let mut p = work.index.lock().unwrap();
let rv = *p;
*p += portion_size as isize;
rv as usize
};
if i >= work.lines.len() {
sender.send( (-1, -1) ).unwrap();
break;
}
for line_index in i..i+portion_size {
if line_index >= work.lines.len() { break }
for spec in &work.specs {
process_spec(&spec, line_index, &work.lines, &sender);
}
}
}
}));
}
let mut selected_indexes = fixedbitset::FixedBitSet::with_capacity(work.lines.len());
let mut num_finished = 0;
while num_finished < threads.len() {
match receiver.recv().unwrap() {
(-1,-1) => { num_finished += 1 }
(a,b) => for i in a..b {
selected_indexes.set(i as usize, true);
}
}
}
// output
let mut prev_index = 0;
for index in 0..work.lines.len() {
if selected_indexes[index] {
if prev_index > 0 {
if index + 1 - prev_index > 1 {
writer.write(b"\n... ... ...\n\n").unwrap();
}
}
writer.write(work.lines[index].as_bytes()).unwrap();
writer.write(b"\n").unwrap();
prev_index = index + 1;
}
}
}
struct Work
{
lines: Vec<String>,
specs: Vec<Spec>,
index: sync::Mutex<isize>,
}
fn process_spec(spec: &Spec, line_index: usize, lines: &Vec<String>, sender: &mpsc::Sender<(isize, isize)>)
{
if let Some(ref rx) = spec.start {
if rx.is_match(&lines[line_index][..]) {
let sel_range = if spec.stop.is_some() || spec.whale.is_some() { try_select(&spec, lines, line_index as isize) } else { Some((line_index as isize,line_index as isize)) };
if let Some((a0,b0)) = sel_range {
let (a, b) = (a0 + spec.start_offset, b0 + spec.stop_offset);
// std::cmp should have this function
fn clamp<T>(a: T, x: T, b: T) -> T where T: Ord { std::cmp::min(std::cmp::max(a, x), b) }
let last_index = (lines.len() - 1) as isize;
let (a, b) = (clamp(0, a, last_index), clamp(0, b, last_index));
// if after applying offsets the range remains nonempty
if a0 <= b0 {
sender.send( (a, b+1) ).unwrap()
} else {
sender.send( (b, a+1) ).unwrap()
}
}
}
}
}
fn consume_specs_toml(filename: &str, specs: &mut Vec<Spec>)
|
#[derive(Clone)]
struct Spec
{
disable: bool,
start: Option<Regex>,
start_offset: isize,
stop: Option<Regex>,
stop_offset: isize,
whale: Option<Regex>,
backward: bool,
limit: isize,
}
impl Spec
{
fn new() -> Self
{
Spec { disable: false, start: None, start_offset: 0, stop: None, stop_offset: 0, whale: None, backward: false, limit: 1000 }
}
}
fn consume_specs_toml_table(table: &toml::value::Table, specs: &mut Vec<Spec>)
{
use toml::Value::*;
let mut spec = Spec::new();
for (key, value) in table {
match &key[..] {
"disable" => {
match *value {
Boolean(x) => { spec.disable = x }
_ => { panic!("`disable` key must be boolean") }
}
}
"start" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.start = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`start` key must be regex string") }
}
}
"start_offset" => { match *value {
Integer(ofs) => { spec.start_offset = ofs as isize; }
_ => { panic!("`start_offset` must be integer") }
} }
"stop" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.stop = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`stop` key must be regex string") }
}
}
"stop_offset" => { match *value {
Integer(ofs) => { spec.stop_offset = ofs as isize; }
_ => { panic!("`stop_offset` must be integer") }
} }
"while" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.whale = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`while` key must be regex string") }
}
}
"direction" => {
match *value {
String(ref s) => { match &s[..] {
"forward" | "fwd" | "down" => { spec.backward = false }
"backward" | "backwards" | "back" | "up" => { spec.backward = true }
ss => { panic!("`direction` value '{}' unrecognized (must be 'forward' or 'backward')", ss) }
} }
_ => { panic!("`direction` must be a string") }
}
}
"limit" => { match *value {
Integer(lim) if lim > 0 => { spec.limit = lim as isize; }
_ => { panic!("`limit` must be a positive integer") }
} }
_ => { match *value {
Table(ref t) => { consume_specs_toml_table(&t, specs) }
_ => { panic!("unrecognized key: {}", key) }
} }
}
}
if !spec.disable && spec.start.is_some() {
specs.push | {
let path = path::Path::new(filename);
let mut file = match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", filename, why.to_string()) }
Ok(f) => f
};
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
let table = match content.parse::<toml::Value>() {
Ok(toml::Value::Table(t)) => { t }
Ok(_) => { panic!("parse error in {}: root value is not a table", filename) }
Err(toml_err) => { panic!("parse error in {}: {}", filename, toml_err) }
};
consume_specs_toml_table(&table, specs);
} | identifier_body |
main.rs | in a..b {
selected_indexes.set(i as usize, true);
}
}
}
// output
let mut prev_index = 0;
for index in 0..work.lines.len() {
if selected_indexes[index] {
if prev_index > 0 {
if index + 1 - prev_index > 1 {
writer.write(b"\n... ... ...\n\n").unwrap();
}
}
writer.write(work.lines[index].as_bytes()).unwrap();
writer.write(b"\n").unwrap();
prev_index = index + 1;
}
}
}
struct Work
{
lines: Vec<String>,
specs: Vec<Spec>,
index: sync::Mutex<isize>,
}
fn process_spec(spec: &Spec, line_index: usize, lines: &Vec<String>, sender: &mpsc::Sender<(isize, isize)>)
{
if let Some(ref rx) = spec.start {
if rx.is_match(&lines[line_index][..]) {
let sel_range = if spec.stop.is_some() || spec.whale.is_some() { try_select(&spec, lines, line_index as isize) } else { Some((line_index as isize,line_index as isize)) };
if let Some((a0,b0)) = sel_range {
let (a, b) = (a0 + spec.start_offset, b0 + spec.stop_offset);
// std::cmp should have this function
fn clamp<T>(a: T, x: T, b: T) -> T where T: Ord { std::cmp::min(std::cmp::max(a, x), b) }
let last_index = (lines.len() - 1) as isize;
let (a, b) = (clamp(0, a, last_index), clamp(0, b, last_index));
// if after applying offsets the range remains nonempty
if a0 <= b0 {
sender.send( (a, b+1) ).unwrap()
} else {
sender.send( (b, a+1) ).unwrap()
}
}
}
}
}
fn consume_specs_toml(filename: &str, specs: &mut Vec<Spec>)
{
let path = path::Path::new(filename);
let mut file = match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", filename, why.to_string()) }
Ok(f) => f
};
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
let table = match content.parse::<toml::Value>() {
Ok(toml::Value::Table(t)) => { t }
Ok(_) => { panic!("parse error in {}: root value is not a table", filename) }
Err(toml_err) => { panic!("parse error in {}: {}", filename, toml_err) }
};
consume_specs_toml_table(&table, specs);
}
#[derive(Clone)]
struct Spec
{
disable: bool,
start: Option<Regex>,
start_offset: isize,
stop: Option<Regex>,
stop_offset: isize,
whale: Option<Regex>,
backward: bool,
limit: isize,
}
impl Spec
{
fn new() -> Self
{
Spec { disable: false, start: None, start_offset: 0, stop: None, stop_offset: 0, whale: None, backward: false, limit: 1000 }
}
}
fn consume_specs_toml_table(table: &toml::value::Table, specs: &mut Vec<Spec>)
{
use toml::Value::*;
let mut spec = Spec::new();
for (key, value) in table {
match &key[..] {
"disable" => {
match *value {
Boolean(x) => { spec.disable = x }
_ => { panic!("`disable` key must be boolean") }
}
}
"start" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.start = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`start` key must be regex string") }
}
}
"start_offset" => { match *value {
Integer(ofs) => { spec.start_offset = ofs as isize; }
_ => { panic!("`start_offset` must be integer") }
} }
"stop" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.stop = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`stop` key must be regex string") }
}
}
"stop_offset" => { match *value {
Integer(ofs) => { spec.stop_offset = ofs as isize; }
_ => { panic!("`stop_offset` must be integer") }
} }
"while" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.whale = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`while` key must be regex string") }
}
}
"direction" => {
match *value {
String(ref s) => { match &s[..] {
"forward" | "fwd" | "down" => { spec.backward = false }
"backward" | "backwards" | "back" | "up" => { spec.backward = true }
ss => { panic!("`direction` value '{}' unrecognized (must be 'forward' or 'backward')", ss) }
} }
_ => { panic!("`direction` must be a string") }
}
}
"limit" => { match *value {
Integer(lim) if lim > 0 => { spec.limit = lim as isize; }
_ => { panic!("`limit` must be a positive integer") }
} }
_ => { match *value {
Table(ref t) => { consume_specs_toml_table(&t, specs) }
_ => { panic!("unrecognized key: {}", key) }
} }
}
}
if !spec.disable && spec.start.is_some() {
specs.push(spec);
}
}
fn try_select(spec: &Spec, lines: &Vec<String>, index: isize) -> Option<(isize, isize)>
{
let step = if spec.backward { -1 } else { 1 };
let mut cursor = index + step;
while (cursor >= 0) && (cursor < lines.len() as isize) && (cursor - index).abs() <= spec.limit {
match spec.stop {
Some(ref rx) if rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor)) }
_ => {}
};
match spec.whale {
Some(ref rx) if !rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor-step)) }
_ => {}
};
cursor += step;
}
match spec.whale {
Some(_) => { return Some((index, cursor-step)) }
_ => { return None }
};
}
#[test]
fn test_all()
{
let sample_lines = read_lines(&mut fs::File::open(&path::Path::new("tests/data/sample.txt")).unwrap());
let mut failed_files = Vec::<String>::new();
println!(""); // cargo test prepends tab to the first line, but not the rest
for entry in std::fs::read_dir(&path::Path::new("tests/data")).unwrap() {
let entry_path = entry.unwrap().path();
if entry_path.extension().unwrap().to_str().unwrap() == "toml" {
let mut specs: Vec<Spec> = vec![];
let toml_path_s = entry_path.clone().into_os_string().into_string().unwrap();
print!("testing {} ... ", toml_path_s);
let _ = io::stdout().flush();
consume_specs_toml(&toml_path_s[..], &mut specs);
let expected_content_path = entry_path.with_extension("txt");
let expected_content_path_str = expected_content_path.clone().into_os_string().into_string().unwrap();
let mut expected_content = String::new();
match fs::File::open(&expected_content_path) {
Err(err) => { panic!("{}: can not open file {}: {}", toml_path_s, expected_content_path_str, err); }
Ok(ref mut f) => { f.read_to_string(&mut expected_content).unwrap(); }
};
let mut output = Vec::<u8>::new();
logselect(specs.clone(), sample_lines.clone(), &mut output);
if expected_content.as_bytes() == &output[..] {
println!("+"); | } else {
failed_files.push(toml_path_s);
println!("fail\n\t{} spec(s) recognized\n--- expected ---\n{}\n--- actual ---", specs.len(), &expected_content[..]);
println!("{}", std::str::from_utf8(&output).unwrap());
println!("--- end ---"); | random_line_split |
|
plugin.py | else:
tempResult = result[count]
tempResult[key] = parseResult({key: ".".join(splitted[:i])}, tempEntry, False)
count += 1
else:
if(index.isdigit()):
temp = temp[int(index)]
else:
temp = temp[index]
return result
def executeJson(command):
xbmc.log("Command: " + command)
temp = json.loads(xbmc.executeJSONRPC(command))
if('result' in temp):
return temp['result']
return None
def passToSkin(listItems):
global handle
global params
xbmc.log('passToSkin called')
xbmc.log('handle ' + str(handle))
for item in listItems:
xbmc.log(str(item.getLabel()))
result = xbmcplugin.addDirectoryItems(handle=handle,
items=[(i.getProperty("path"), i, False) for i in listItems],
totalItems=len(listItems))
xbmc.log("adding dir was " + str(result))
xbmcplugin.endOfDirectory(handle)
#if('id' in params):
# windowId = 12901
# if('windowid' in params):
# windowId = int(Params['windowid'])
# xbmcgui.Window(windowId).getControl(int(params['id'])).setEnabled(True)
xbmcplugin.setResolvedUrl(handle=handle, succeeded=True, listitem=xbmcgui.ListItem())
return
def getDirectorFolder(director):
command = '{"jsonrpc": "2.0","method": "Files.GetDirectory","params": {"directory": "videodb://movies/directors/"}, "id": 1}'
jsonResult = executeJson(command)['files']
for entry in jsonResult:
if entry['label'] == director:
return entry['file']
def getSeasonForEpisode(mediaInfo):
command = '{"jsonrpc": "2.0","method": "Player.GetActivePlayers", "id": 1}'
#{"id":1,"jsonrpc":"2.0","result":[{"playerid":1,"playertype":"internal","type":"video"}]}
jsonResult = executeJson(command)
playerId = jsonResult[0]['playerid']
command = '{"jsonrpc": "2.0","method": "Player.GetItem","params": {"playerid": '+str(playerId)+', "properties":["tvshowid","season"]}, "id": 1}'
jsonResult = executeJson(command)['item']
xbmc.log(json.dumps(jsonResult))
global params
depth = params['depth']
xbmc.log("depth" + depth)
if(depth == "0"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'])+ '/'
if(depth == "1"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'] + 1)+ '/'
if(depth == "2"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'] - 1)+ '/'
def getSurroundingMovies(mediaInfo):
command = '{"jsonrpc": "2.0","method": "Player.GetActivePlayers", "id": 1}'
#{"id":1,"jsonrpc":"2.0","result":[{"playerid":1,"playertype":"internal","type":"video"}]}
jsonResult = executeJson(command)
playerId = jsonResult[0]['playerid']
command = '{"jsonrpc": "2.0","method": "Player.GetItem","params": {"playerid": '+str(playerId)+', "properties":["director","setid"]}, "id": 1}'
jsonResult = executeJson(command)['item']
if jsonResult['setid'] != 0:
return 'videodb://movies/sets/'+str(jsonResult['setid'])
return getDirectorFolder(jsonResult['director'][0])
def createListItemsFromDescriptor(listItemDescriptors):
result = []
for listItemDescriptor in listItemDescriptors:
art = {}
if("thumb" in listItemDescriptor):
art['thumb'] = listItemDescriptor['thumb']
elif("icon" in listItemDescriptor):
|
elif("poster" in listItemDescriptor):
art['poster'] = listItemDescriptor['poster']
result.append(createListItem(listItemDescriptors['label'], listItemDescriptors['path'], listItemDescriptors['thumb'], listItemDescriptors['label']))
return result
def doGet(url, headers):
req = Request(url, headers=headers)
response = urlopen(req)
result = response.read()
xbmc.log(result)
return json.loads(result)
def findProviderForPlugin(mediaInfo):
path = mediaInfo['pluginpath']
providerResult = {}
providerResult['type'] = None
providerResult['path'] = []
global providersConfig
global params
toSearch = providersConfig[params['depth']]
xbmc.log("finding plugin")
xbmc.log(str(providersConfig))
for key, values in toSearch.items():
xbmc.log(path)
xbmc.log(key)
xbmc.log(str(values))
match = re.search(key, path)
if(match):
xbmc.log("match")
result = []
for providers in values:
for provider in providers:
providerType = provider['type']
if(providerType == 'plugin'):
# is always last to be called
providerResult['type'] = "path"
path = eval(provider['path'])
providerResult['path'].append(path)
elif(providerType == 'getlistitm'):
# is always last to be called
providerResult['type'] = "items"
url = eval(provider['path'])
headers = {}
resultMapping = provider['result']
if 'headers' in provider:
for headerName, headerValue in provider['headers'].items():
headers[headerName] = eval(headerValue)
rResult = doGet(url, headers)
listItemDescriptors = parseResult(resultMapping, rResult, True)
providerResult['path'].append(createListItemFromDescriptor(listItemDescriptors))
elif(providerType == 'get'):
url = eval(provider['path'])
headers = {}
resultMapping = provider['result']
if 'headers' in provider:
for headerName, headerValue in provider['headers'].items():
headers[headerName] = eval(headerValue)
xbmc.log("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
xbmc.log(url)
xbmc.log(str(headers))
rResult = doGet(url, headers)
xbmc.log(str(rResult))
result.append(parseResult(resultMapping, rResult, False))
return providerResult
xbmc.log("loop ended")
providerResult['type'] = "items"
providerResult['path'] = getLastDir()
xbmc.log("last dir used " + str(providerResult))
return providerResult
def getRelatedProvider(mediaInfo):
provider = {}
if (mediaInfo['type'] == "video"):
provider['type'] = "path"
provider['path'] = [mediaInfo['folderpath']]
elif (mediaInfo['type'] == "episode"):
provider['type'] = "path"
provider['path'] = [getSeasonForEpisode(mediaInfo)]
elif (mediaInfo['type'] == "movie"):
provider['type'] = "path"
provider['path'] = [getSurroundingMovies(mediaInfo)]
elif (mediaInfo['type'] == "plugin"):
provider = findProviderForPlugin(mediaInfo)
return provider
def createListItem(name, art, path, label, focused = False):
global count
if(focused):
xbmc.log(":::::::::::::::::::::::::::::::::")
li = xbmcgui.ListItem(name)
li.setArt(art)
li.setLabel(label)
li.setProperty("isPlayable", "false")
li.setProperty("index", str(count))
li.setPath(path=buildPath(art, path, label))
li.setProperty('path', path)
li.select(focused)
count += 1
return li
def buildPath(art, path, label):
return "plugin://plugin.program.relatedmedia?play=1&art=" + urllib.quote(json.dumps(art)) + "&path=" + urllib.quote(path) + "&label=" + urllib.quote(label.encode('utf8'))
def getInternalRelated(provider):
global params
length = -1
jsonResult = None
if('length' in params):
length = params['length']
for path in provider['path']:
command = '{"jsonrpc": "2.0","method": "Files.GetDirectory","params": {"directory": "'+path+'", "properties":["art"]'
#if(length != -1):
# command += ',"limits": {"end":"' + str(length)+'"}'
command += '}, "id": 1}'
jsonResult = executeJson(command)
if(jsonResult == None or not 'files' in jsonResult):
continue
break
if(jsonResult == None or not 'files' in jsonResult):
return None
jsonResult = jsonResult['files']
listItems = []
for entry in jsonResult:
listItems.append(createListItem(entry['label'], entry['art'], entry['file'], entry['label']))
return listItems
def getRelatedItems(mediaInfo):
provider = getRelatedProvider(mediaInfo)
if(provider[' | art['icon'] = listItemDescriptor['icon'] | conditional_block |
plugin.py | (True)
xbmcplugin.setResolvedUrl(handle=handle, succeeded=True, listitem=xbmcgui.ListItem())
return
def getDirectorFolder(director):
command = '{"jsonrpc": "2.0","method": "Files.GetDirectory","params": {"directory": "videodb://movies/directors/"}, "id": 1}'
jsonResult = executeJson(command)['files']
for entry in jsonResult:
if entry['label'] == director:
return entry['file']
def getSeasonForEpisode(mediaInfo):
command = '{"jsonrpc": "2.0","method": "Player.GetActivePlayers", "id": 1}'
#{"id":1,"jsonrpc":"2.0","result":[{"playerid":1,"playertype":"internal","type":"video"}]}
jsonResult = executeJson(command)
playerId = jsonResult[0]['playerid']
command = '{"jsonrpc": "2.0","method": "Player.GetItem","params": {"playerid": '+str(playerId)+', "properties":["tvshowid","season"]}, "id": 1}'
jsonResult = executeJson(command)['item']
xbmc.log(json.dumps(jsonResult))
global params
depth = params['depth']
xbmc.log("depth" + depth)
if(depth == "0"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'])+ '/'
if(depth == "1"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'] + 1)+ '/'
if(depth == "2"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'] - 1)+ '/'
def getSurroundingMovies(mediaInfo):
command = '{"jsonrpc": "2.0","method": "Player.GetActivePlayers", "id": 1}'
#{"id":1,"jsonrpc":"2.0","result":[{"playerid":1,"playertype":"internal","type":"video"}]}
jsonResult = executeJson(command)
playerId = jsonResult[0]['playerid']
command = '{"jsonrpc": "2.0","method": "Player.GetItem","params": {"playerid": '+str(playerId)+', "properties":["director","setid"]}, "id": 1}'
jsonResult = executeJson(command)['item']
if jsonResult['setid'] != 0:
return 'videodb://movies/sets/'+str(jsonResult['setid'])
return getDirectorFolder(jsonResult['director'][0])
def createListItemsFromDescriptor(listItemDescriptors):
result = []
for listItemDescriptor in listItemDescriptors:
art = {}
if("thumb" in listItemDescriptor):
art['thumb'] = listItemDescriptor['thumb']
elif("icon" in listItemDescriptor):
art['icon'] = listItemDescriptor['icon']
elif("poster" in listItemDescriptor):
art['poster'] = listItemDescriptor['poster']
result.append(createListItem(listItemDescriptors['label'], listItemDescriptors['path'], listItemDescriptors['thumb'], listItemDescriptors['label']))
return result
def doGet(url, headers):
req = Request(url, headers=headers)
response = urlopen(req)
result = response.read()
xbmc.log(result)
return json.loads(result)
def findProviderForPlugin(mediaInfo):
path = mediaInfo['pluginpath']
providerResult = {}
providerResult['type'] = None
providerResult['path'] = []
global providersConfig
global params
toSearch = providersConfig[params['depth']]
xbmc.log("finding plugin")
xbmc.log(str(providersConfig))
for key, values in toSearch.items():
xbmc.log(path)
xbmc.log(key)
xbmc.log(str(values))
match = re.search(key, path)
if(match):
xbmc.log("match")
result = []
for providers in values:
for provider in providers:
providerType = provider['type']
if(providerType == 'plugin'):
# is always last to be called
providerResult['type'] = "path"
path = eval(provider['path'])
providerResult['path'].append(path)
elif(providerType == 'getlistitm'):
# is always last to be called
providerResult['type'] = "items"
url = eval(provider['path'])
headers = {}
resultMapping = provider['result']
if 'headers' in provider:
for headerName, headerValue in provider['headers'].items():
headers[headerName] = eval(headerValue)
rResult = doGet(url, headers)
listItemDescriptors = parseResult(resultMapping, rResult, True)
providerResult['path'].append(createListItemFromDescriptor(listItemDescriptors))
elif(providerType == 'get'):
url = eval(provider['path'])
headers = {}
resultMapping = provider['result']
if 'headers' in provider:
for headerName, headerValue in provider['headers'].items():
headers[headerName] = eval(headerValue)
xbmc.log("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
xbmc.log(url)
xbmc.log(str(headers))
rResult = doGet(url, headers)
xbmc.log(str(rResult))
result.append(parseResult(resultMapping, rResult, False))
return providerResult
xbmc.log("loop ended")
providerResult['type'] = "items"
providerResult['path'] = getLastDir()
xbmc.log("last dir used " + str(providerResult))
return providerResult
def getRelatedProvider(mediaInfo):
provider = {}
if (mediaInfo['type'] == "video"):
provider['type'] = "path"
provider['path'] = [mediaInfo['folderpath']]
elif (mediaInfo['type'] == "episode"):
provider['type'] = "path"
provider['path'] = [getSeasonForEpisode(mediaInfo)]
elif (mediaInfo['type'] == "movie"):
provider['type'] = "path"
provider['path'] = [getSurroundingMovies(mediaInfo)]
elif (mediaInfo['type'] == "plugin"):
provider = findProviderForPlugin(mediaInfo)
return provider
def createListItem(name, art, path, label, focused = False):
global count
if(focused):
xbmc.log(":::::::::::::::::::::::::::::::::")
li = xbmcgui.ListItem(name)
li.setArt(art)
li.setLabel(label)
li.setProperty("isPlayable", "false")
li.setProperty("index", str(count))
li.setPath(path=buildPath(art, path, label))
li.setProperty('path', path)
li.select(focused)
count += 1
return li
def buildPath(art, path, label):
return "plugin://plugin.program.relatedmedia?play=1&art=" + urllib.quote(json.dumps(art)) + "&path=" + urllib.quote(path) + "&label=" + urllib.quote(label.encode('utf8'))
def getInternalRelated(provider):
global params
length = -1
jsonResult = None
if('length' in params):
length = params['length']
for path in provider['path']:
command = '{"jsonrpc": "2.0","method": "Files.GetDirectory","params": {"directory": "'+path+'", "properties":["art"]'
#if(length != -1):
# command += ',"limits": {"end":"' + str(length)+'"}'
command += '}, "id": 1}'
jsonResult = executeJson(command)
if(jsonResult == None or not 'files' in jsonResult):
continue
break
if(jsonResult == None or not 'files' in jsonResult):
return None
jsonResult = jsonResult['files']
listItems = []
for entry in jsonResult:
listItems.append(createListItem(entry['label'], entry['art'], entry['file'], entry['label']))
return listItems
def getRelatedItems(mediaInfo):
provider = getRelatedProvider(mediaInfo)
if(provider['type'] == "path"):
return getInternalRelated(provider)
elif(provider['type'] == "items"):
return provider['path']
def getRunningmediaInfoInfo():
item = {}
global player
if(player.isPlayingVideo()):
mediaType = xbmc.Player().getVideoInfoTag().getMediaType()
if("episode" == mediaType or "movie" == mediaType):
item['type'] = mediaType
item['dbid'] = xbmc.getInfoLabel('VideoPlayer.DBID')
else:
if(len(xbmc.getInfoLabel('Player.Filenameandpath')) > 9 and xbmc.getInfoLabel('Player.Filenameandpath')[:9] == "plugin://"):
item['type'] = "plugin"
item['pluginpath'] = xbmc.getInfoLabel('Player.Filenameandpath')
else:
item['type'] = "video"
item['folderpath'] = xbmc.getInfoLabel('Player.Folderpath')
elif(player.isPlayingAudio()):
item['type'] = "audio"
else:
item['type'] = "plugin"
item['pluginpath'] = xbmc.getInfoLabel('Player.Filenameandpath')
return item
def getRecommendations():
| mediaInfo = getRunningmediaInfoInfo()
listItems = getRelatedItems(mediaInfo)
if(listItems):
passToSkin(listItems) | identifier_body |
|
plugin.py | else:
tempResult = result[count]
tempResult[key] = parseResult({key: ".".join(splitted[:i])}, tempEntry, False)
count += 1
else:
if(index.isdigit()):
temp = temp[int(index)]
else:
temp = temp[index]
return result
def executeJson(command):
xbmc.log("Command: " + command)
temp = json.loads(xbmc.executeJSONRPC(command))
if('result' in temp):
return temp['result']
return None
def passToSkin(listItems):
global handle
global params
xbmc.log('passToSkin called')
xbmc.log('handle ' + str(handle))
for item in listItems:
xbmc.log(str(item.getLabel()))
result = xbmcplugin.addDirectoryItems(handle=handle,
items=[(i.getProperty("path"), i, False) for i in listItems],
totalItems=len(listItems))
xbmc.log("adding dir was " + str(result))
xbmcplugin.endOfDirectory(handle)
#if('id' in params):
# windowId = 12901
# if('windowid' in params):
# windowId = int(Params['windowid'])
# xbmcgui.Window(windowId).getControl(int(params['id'])).setEnabled(True)
xbmcplugin.setResolvedUrl(handle=handle, succeeded=True, listitem=xbmcgui.ListItem())
return
def getDirectorFolder(director):
command = '{"jsonrpc": "2.0","method": "Files.GetDirectory","params": {"directory": "videodb://movies/directors/"}, "id": 1}'
jsonResult = executeJson(command)['files']
for entry in jsonResult:
if entry['label'] == director:
return entry['file']
def getSeasonForEpisode(mediaInfo):
command = '{"jsonrpc": "2.0","method": "Player.GetActivePlayers", "id": 1}'
#{"id":1,"jsonrpc":"2.0","result":[{"playerid":1,"playertype":"internal","type":"video"}]}
jsonResult = executeJson(command)
playerId = jsonResult[0]['playerid']
command = '{"jsonrpc": "2.0","method": "Player.GetItem","params": {"playerid": '+str(playerId)+', "properties":["tvshowid","season"]}, "id": 1}'
jsonResult = executeJson(command)['item']
xbmc.log(json.dumps(jsonResult))
global params
depth = params['depth']
xbmc.log("depth" + depth)
if(depth == "0"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'])+ '/'
if(depth == "1"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'] + 1)+ '/'
if(depth == "2"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'] - 1)+ '/'
def getSurroundingMovies(mediaInfo):
command = '{"jsonrpc": "2.0","method": "Player.GetActivePlayers", "id": 1}'
#{"id":1,"jsonrpc":"2.0","result":[{"playerid":1,"playertype":"internal","type":"video"}]}
jsonResult = executeJson(command)
playerId = jsonResult[0]['playerid']
command = '{"jsonrpc": "2.0","method": "Player.GetItem","params": {"playerid": '+str(playerId)+', "properties":["director","setid"]}, "id": 1}'
jsonResult = executeJson(command)['item']
if jsonResult['setid'] != 0:
return 'videodb://movies/sets/'+str(jsonResult['setid'])
return getDirectorFolder(jsonResult['director'][0])
def createListItemsFromDescriptor(listItemDescriptors):
result = []
for listItemDescriptor in listItemDescriptors:
art = {}
if("thumb" in listItemDescriptor):
art['thumb'] = listItemDescriptor['thumb']
elif("icon" in listItemDescriptor):
art['icon'] = listItemDescriptor['icon']
elif("poster" in listItemDescriptor):
art['poster'] = listItemDescriptor['poster']
result.append(createListItem(listItemDescriptors['label'], listItemDescriptors['path'], listItemDescriptors['thumb'], listItemDescriptors['label']))
return result
def doGet(url, headers):
req = Request(url, headers=headers)
response = urlopen(req)
result = response.read()
xbmc.log(result)
return json.loads(result)
def findProviderForPlugin(mediaInfo):
path = mediaInfo['pluginpath']
providerResult = {}
providerResult['type'] = None
providerResult['path'] = []
global providersConfig
global params
toSearch = providersConfig[params['depth']]
xbmc.log("finding plugin")
xbmc.log(str(providersConfig))
for key, values in toSearch.items():
xbmc.log(path)
xbmc.log(key)
xbmc.log(str(values))
match = re.search(key, path)
if(match):
xbmc.log("match")
result = []
for providers in values:
for provider in providers:
providerType = provider['type']
if(providerType == 'plugin'):
# is always last to be called
providerResult['type'] = "path"
path = eval(provider['path'])
providerResult['path'].append(path)
elif(providerType == 'getlistitm'):
# is always last to be called
providerResult['type'] = "items"
url = eval(provider['path'])
headers = {}
resultMapping = provider['result']
if 'headers' in provider:
for headerName, headerValue in provider['headers'].items():
headers[headerName] = eval(headerValue)
rResult = doGet(url, headers)
listItemDescriptors = parseResult(resultMapping, rResult, True)
providerResult['path'].append(createListItemFromDescriptor(listItemDescriptors))
elif(providerType == 'get'):
url = eval(provider['path'])
headers = {}
resultMapping = provider['result'] | xbmc.log("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
xbmc.log(url)
xbmc.log(str(headers))
rResult = doGet(url, headers)
xbmc.log(str(rResult))
result.append(parseResult(resultMapping, rResult, False))
return providerResult
xbmc.log("loop ended")
providerResult['type'] = "items"
providerResult['path'] = getLastDir()
xbmc.log("last dir used " + str(providerResult))
return providerResult
def getRelatedProvider(mediaInfo):
provider = {}
if (mediaInfo['type'] == "video"):
provider['type'] = "path"
provider['path'] = [mediaInfo['folderpath']]
elif (mediaInfo['type'] == "episode"):
provider['type'] = "path"
provider['path'] = [getSeasonForEpisode(mediaInfo)]
elif (mediaInfo['type'] == "movie"):
provider['type'] = "path"
provider['path'] = [getSurroundingMovies(mediaInfo)]
elif (mediaInfo['type'] == "plugin"):
provider = findProviderForPlugin(mediaInfo)
return provider
def createListItem(name, art, path, label, focused = False):
global count
if(focused):
xbmc.log(":::::::::::::::::::::::::::::::::")
li = xbmcgui.ListItem(name)
li.setArt(art)
li.setLabel(label)
li.setProperty("isPlayable", "false")
li.setProperty("index", str(count))
li.setPath(path=buildPath(art, path, label))
li.setProperty('path', path)
li.select(focused)
count += 1
return li
def buildPath(art, path, label):
return "plugin://plugin.program.relatedmedia?play=1&art=" + urllib.quote(json.dumps(art)) + "&path=" + urllib.quote(path) + "&label=" + urllib.quote(label.encode('utf8'))
def getInternalRelated(provider):
global params
length = -1
jsonResult = None
if('length' in params):
length = params['length']
for path in provider['path']:
command = '{"jsonrpc": "2.0","method": "Files.GetDirectory","params": {"directory": "'+path+'", "properties":["art"]'
#if(length != -1):
# command += ',"limits": {"end":"' + str(length)+'"}'
command += '}, "id": 1}'
jsonResult = executeJson(command)
if(jsonResult == None or not 'files' in jsonResult):
continue
break
if(jsonResult == None or not 'files' in jsonResult):
return None
jsonResult = jsonResult['files']
listItems = []
for entry in jsonResult:
listItems.append(createListItem(entry['label'], entry['art'], entry['file'], entry['label']))
return listItems
def getRelatedItems(mediaInfo):
provider = getRelatedProvider(mediaInfo)
if(provider['type | if 'headers' in provider:
for headerName, headerValue in provider['headers'].items():
headers[headerName] = eval(headerValue) | random_line_split |
plugin.py | :
tempResult = result[count]
tempResult[key] = parseResult({key: ".".join(splitted[:i])}, tempEntry, False)
count += 1
else:
if(index.isdigit()):
temp = temp[int(index)]
else:
temp = temp[index]
return result
def executeJson(command):
xbmc.log("Command: " + command)
temp = json.loads(xbmc.executeJSONRPC(command))
if('result' in temp):
return temp['result']
return None
def passToSkin(listItems):
global handle
global params
xbmc.log('passToSkin called')
xbmc.log('handle ' + str(handle))
for item in listItems:
xbmc.log(str(item.getLabel()))
result = xbmcplugin.addDirectoryItems(handle=handle,
items=[(i.getProperty("path"), i, False) for i in listItems],
totalItems=len(listItems))
xbmc.log("adding dir was " + str(result))
xbmcplugin.endOfDirectory(handle)
#if('id' in params):
# windowId = 12901
# if('windowid' in params):
# windowId = int(Params['windowid'])
# xbmcgui.Window(windowId).getControl(int(params['id'])).setEnabled(True)
xbmcplugin.setResolvedUrl(handle=handle, succeeded=True, listitem=xbmcgui.ListItem())
return
def getDirectorFolder(director):
command = '{"jsonrpc": "2.0","method": "Files.GetDirectory","params": {"directory": "videodb://movies/directors/"}, "id": 1}'
jsonResult = executeJson(command)['files']
for entry in jsonResult:
if entry['label'] == director:
return entry['file']
def getSeasonForEpisode(mediaInfo):
command = '{"jsonrpc": "2.0","method": "Player.GetActivePlayers", "id": 1}'
#{"id":1,"jsonrpc":"2.0","result":[{"playerid":1,"playertype":"internal","type":"video"}]}
jsonResult = executeJson(command)
playerId = jsonResult[0]['playerid']
command = '{"jsonrpc": "2.0","method": "Player.GetItem","params": {"playerid": '+str(playerId)+', "properties":["tvshowid","season"]}, "id": 1}'
jsonResult = executeJson(command)['item']
xbmc.log(json.dumps(jsonResult))
global params
depth = params['depth']
xbmc.log("depth" + depth)
if(depth == "0"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'])+ '/'
if(depth == "1"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'] + 1)+ '/'
if(depth == "2"):
return 'videodb://tvshows/titles/' +str(jsonResult['tvshowid'])+ '/'+str(jsonResult['season'] - 1)+ '/'
def getSurroundingMovies(mediaInfo):
command = '{"jsonrpc": "2.0","method": "Player.GetActivePlayers", "id": 1}'
#{"id":1,"jsonrpc":"2.0","result":[{"playerid":1,"playertype":"internal","type":"video"}]}
jsonResult = executeJson(command)
playerId = jsonResult[0]['playerid']
command = '{"jsonrpc": "2.0","method": "Player.GetItem","params": {"playerid": '+str(playerId)+', "properties":["director","setid"]}, "id": 1}'
jsonResult = executeJson(command)['item']
if jsonResult['setid'] != 0:
return 'videodb://movies/sets/'+str(jsonResult['setid'])
return getDirectorFolder(jsonResult['director'][0])
def createListItemsFromDescriptor(listItemDescriptors):
result = []
for listItemDescriptor in listItemDescriptors:
art = {}
if("thumb" in listItemDescriptor):
art['thumb'] = listItemDescriptor['thumb']
elif("icon" in listItemDescriptor):
art['icon'] = listItemDescriptor['icon']
elif("poster" in listItemDescriptor):
art['poster'] = listItemDescriptor['poster']
result.append(createListItem(listItemDescriptors['label'], listItemDescriptors['path'], listItemDescriptors['thumb'], listItemDescriptors['label']))
return result
def | (url, headers):
req = Request(url, headers=headers)
response = urlopen(req)
result = response.read()
xbmc.log(result)
return json.loads(result)
def findProviderForPlugin(mediaInfo):
path = mediaInfo['pluginpath']
providerResult = {}
providerResult['type'] = None
providerResult['path'] = []
global providersConfig
global params
toSearch = providersConfig[params['depth']]
xbmc.log("finding plugin")
xbmc.log(str(providersConfig))
for key, values in toSearch.items():
xbmc.log(path)
xbmc.log(key)
xbmc.log(str(values))
match = re.search(key, path)
if(match):
xbmc.log("match")
result = []
for providers in values:
for provider in providers:
providerType = provider['type']
if(providerType == 'plugin'):
# is always last to be called
providerResult['type'] = "path"
path = eval(provider['path'])
providerResult['path'].append(path)
elif(providerType == 'getlistitm'):
# is always last to be called
providerResult['type'] = "items"
url = eval(provider['path'])
headers = {}
resultMapping = provider['result']
if 'headers' in provider:
for headerName, headerValue in provider['headers'].items():
headers[headerName] = eval(headerValue)
rResult = doGet(url, headers)
listItemDescriptors = parseResult(resultMapping, rResult, True)
providerResult['path'].append(createListItemFromDescriptor(listItemDescriptors))
elif(providerType == 'get'):
url = eval(provider['path'])
headers = {}
resultMapping = provider['result']
if 'headers' in provider:
for headerName, headerValue in provider['headers'].items():
headers[headerName] = eval(headerValue)
xbmc.log("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
xbmc.log(url)
xbmc.log(str(headers))
rResult = doGet(url, headers)
xbmc.log(str(rResult))
result.append(parseResult(resultMapping, rResult, False))
return providerResult
xbmc.log("loop ended")
providerResult['type'] = "items"
providerResult['path'] = getLastDir()
xbmc.log("last dir used " + str(providerResult))
return providerResult
def getRelatedProvider(mediaInfo):
provider = {}
if (mediaInfo['type'] == "video"):
provider['type'] = "path"
provider['path'] = [mediaInfo['folderpath']]
elif (mediaInfo['type'] == "episode"):
provider['type'] = "path"
provider['path'] = [getSeasonForEpisode(mediaInfo)]
elif (mediaInfo['type'] == "movie"):
provider['type'] = "path"
provider['path'] = [getSurroundingMovies(mediaInfo)]
elif (mediaInfo['type'] == "plugin"):
provider = findProviderForPlugin(mediaInfo)
return provider
def createListItem(name, art, path, label, focused = False):
global count
if(focused):
xbmc.log(":::::::::::::::::::::::::::::::::")
li = xbmcgui.ListItem(name)
li.setArt(art)
li.setLabel(label)
li.setProperty("isPlayable", "false")
li.setProperty("index", str(count))
li.setPath(path=buildPath(art, path, label))
li.setProperty('path', path)
li.select(focused)
count += 1
return li
def buildPath(art, path, label):
return "plugin://plugin.program.relatedmedia?play=1&art=" + urllib.quote(json.dumps(art)) + "&path=" + urllib.quote(path) + "&label=" + urllib.quote(label.encode('utf8'))
def getInternalRelated(provider):
global params
length = -1
jsonResult = None
if('length' in params):
length = params['length']
for path in provider['path']:
command = '{"jsonrpc": "2.0","method": "Files.GetDirectory","params": {"directory": "'+path+'", "properties":["art"]'
#if(length != -1):
# command += ',"limits": {"end":"' + str(length)+'"}'
command += '}, "id": 1}'
jsonResult = executeJson(command)
if(jsonResult == None or not 'files' in jsonResult):
continue
break
if(jsonResult == None or not 'files' in jsonResult):
return None
jsonResult = jsonResult['files']
listItems = []
for entry in jsonResult:
listItems.append(createListItem(entry['label'], entry['art'], entry['file'], entry['label']))
return listItems
def getRelatedItems(mediaInfo):
provider = getRelatedProvider(mediaInfo)
if(provider[' | doGet | identifier_name |
storage.go | .Type.StorageType()
targetStorageType := target.Type.StorageType()
if sourceStorageType != targetStorageType {
return errors.Newf(
"source vdisk %s and target vdisk %s have different storageTypes (%s != %s)",
source.VdiskID, target.VdiskID, sourceStorageType, targetStorageType)
}
var err error
switch sourceStorageType {
case config.StorageDeduped:
err = copyDedupedMetadata(
source.VdiskID, target.VdiskID, source.BlockSize, target.BlockSize,
sourceCluster, targetCluster)
case config.StorageNonDeduped:
err = copyNonDedupedData(
source.VdiskID, target.VdiskID, source.BlockSize, target.BlockSize,
sourceCluster, targetCluster)
case config.StorageSemiDeduped:
err = copySemiDeduped(
source.VdiskID, target.VdiskID, source.BlockSize, target.BlockSize,
sourceCluster, targetCluster)
default:
err = errors.Newf(
"%v is not a supported storage type", sourceStorageType)
}
if err != nil || !source.Type.TlogSupport() || !target.Type.TlogSupport() {
return err
}
return copyTlogMetadata(source.VdiskID, target.VdiskID, sourceCluster, targetCluster)
}
// DeleteVdisk returns true if the vdisk in question was deleted from the given ARDB storage cluster.
// An error is returned in case this couldn't be deleted (completely) for whatever reason.
//
// Note that for deduped storage the actual block data isn't deleted or dereferenced.
// See https://github.com/zero-os/0-Disk/issues/147
func DeleteVdisk(vdiskID string, configSource config.Source) (bool, error) {
staticConfig, err := config.ReadVdiskStaticConfig(configSource, vdiskID)
if err != nil {
return false, err
}
nbdConfig, err := config.ReadVdiskNBDConfig(configSource, vdiskID)
if err != nil {
return false, err
}
clusterConfig, err := config.ReadStorageClusterConfig(configSource, nbdConfig.StorageClusterID)
if err != nil {
return false, err
}
// if slave cluster is configured, we'll want to delete the vdisk from it as well
if nbdConfig.SlaveStorageClusterID != "" {
slaveClusterCfg, err := config.ReadStorageClusterConfig(configSource, nbdConfig.SlaveStorageClusterID)
if err != nil {
return false, err
}
clusterConfig.Servers = append(clusterConfig.Servers, slaveClusterCfg.Servers...)
}
// create a cluster of all primary (and slave) servers
cluster, err := ardb.NewCluster(*clusterConfig, nil)
if err != nil {
return false, err
}
// delete all data for this vdisk found in primary (and slave) servers
return DeleteVdiskInCluster(vdiskID, staticConfig.Type, cluster)
}
// DeleteVdiskInCluster returns true if the vdisk in question was deleted from the given ARDB storage cluster.
// An error is returned in case this couldn't be deleted (completely) for whatever reason.
//
// Note that for deduped storage the actual block data isn't deleted or dereferenced.
// See https://github.com/zero-os/0-Disk/issues/147
func DeleteVdiskInCluster(vdiskID string, t config.VdiskType, cluster ardb.StorageCluster) (bool, error) {
var err error
var deletedTlogMetadata bool
if t.TlogSupport() {
command := ardb.Command(command.Delete, tlogMetadataKey(vdiskID))
deletedTlogMetadata, err = ardb.Bool(cluster.Do(command))
if err != nil {
return false, err
}
if deletedTlogMetadata {
log.Infof("deleted tlog metadata stored for vdisk %s on first available server", vdiskID)
}
}
var deletedStorage bool
switch st := t.StorageType(); st {
case config.StorageDeduped:
deletedStorage, err = deleteDedupedData(vdiskID, cluster)
case config.StorageNonDeduped:
deletedStorage, err = deleteNonDedupedData(vdiskID, cluster)
case config.StorageSemiDeduped:
deletedStorage, err = deleteSemiDedupedData(vdiskID, cluster)
default:
err = errors.Newf("%v is not a supported storage type", st)
}
return deletedTlogMetadata || deletedStorage, err
}
// ListVdisks scans a given storage cluster
// for available vdisks, and returns their ids.
// Optionally a predicate can be given to
// filter specific vdisks based on their identifiers.
// NOTE: this function is very slow,
// and puts a lot of pressure on the ARDB cluster.
func ListVdisks(cluster ardb.StorageCluster, pred func(vdiskID string) bool) ([]string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
serverCh, err := cluster.ServerIterator(ctx)
if err != nil {
return nil, err
}
type serverResult struct {
ids []string
err error
}
resultCh := make(chan serverResult)
var action listVdisksAction
if pred == nil {
action.filter = filterListedVdiskID
} else {
action.filter = func(str string) (string, bool) {
str, ok := filterListedVdiskID(str)
if !ok {
return "", false
}
return str, pred(str)
}
}
var serverCount int
var reply interface{}
for server := range serverCh {
server := server
go func() {
var result serverResult
log.Infof("listing all vdisks stored on %v", server.Config())
reply, result.err = server.Do(action)
if result.err == nil && reply != nil {
// [NOTE] this line of code relies on the fact that our
// custom `listVdisksAction` type returns a `[]string` value as a reply,
// as soon as that logic changes, this line will start causing trouble.
result.ids = reply.([]string)
}
select {
case resultCh <- result:
case <-ctx.Done():
}
}()
serverCount++
}
// collect the ids from all servers within the given cluster
var ids []string
var result serverResult
for i := 0; i < serverCount; i++ {
result = <-resultCh
if result.err != nil {
// return early, an error has occured!
return nil, result.err
}
ids = append(ids, result.ids...)
}
if len(ids) <= 1 {
return ids, nil // nothing to do
}
// sort and dedupe
sort.Strings(ids)
ids = dedupStrings(ids)
return ids, nil
}
type listVdisksAction struct {
filter func(string) (string, bool)
}
// Do implements StorageAction.Do
func (action listVdisksAction) Do(conn ardb.Conn) (reply interface{}, err error) {
const (
startCursor = "0"
itemCount = "5000"
)
var output, vdisks []string
var slice interface{}
// initial cursor and action
cursor := startCursor
scan := ardb.Command(command.Scan, cursor, "COUNT", itemCount)
// go through all available keys
for {
// get new cursor and raw data
cursor, slice, err = ardb.CursorAndValues(scan.Do(conn))
// convert the raw data to a string slice we can use
output, err = ardb.OptStrings(slice, err)
// return early in case of error
if err != nil {
return nil, err
}
// filter output
filterPos := 0
var ok bool
var vdiskID string
for i := range output {
vdiskID, ok = action.filter(output[i])
if ok {
output[filterPos] = vdiskID
filterPos++
}
}
output = output[:filterPos]
vdisks = append(vdisks, output...)
log.Debugf("%d/%s identifiers in iteration which match the given filters",
len(output), itemCount)
// stop in case we iterated through all possible values
if cursor == startCursor || cursor == "" {
break
}
scan = ardb.Command(command.Scan, cursor, "COUNT", itemCount)
}
return vdisks, nil
}
// Send implements StorageAction.Send
func (action listVdisksAction) Send(conn ardb.Conn) error {
return ErrMethodNotSupported
}
// KeysModified implements StorageAction.KeysModified
func (action listVdisksAction) KeysModified() ([]string, bool) {
return nil, false
}
// ListBlockIndices returns all indices stored for the given vdisk from a config source.
func ListBlockIndices(vdiskID string, source config.Source) ([]int64, error) {
staticConfig, err := config.ReadVdiskStaticConfig(source, vdiskID)
if err != nil | {
return nil, err
} | conditional_block |
|
storage.go | dereferenced.
// See https://github.com/zero-os/0-Disk/issues/147
func DeleteVdiskInCluster(vdiskID string, t config.VdiskType, cluster ardb.StorageCluster) (bool, error) {
var err error
var deletedTlogMetadata bool
if t.TlogSupport() {
command := ardb.Command(command.Delete, tlogMetadataKey(vdiskID))
deletedTlogMetadata, err = ardb.Bool(cluster.Do(command))
if err != nil {
return false, err
}
if deletedTlogMetadata {
log.Infof("deleted tlog metadata stored for vdisk %s on first available server", vdiskID)
}
}
var deletedStorage bool
switch st := t.StorageType(); st {
case config.StorageDeduped:
deletedStorage, err = deleteDedupedData(vdiskID, cluster)
case config.StorageNonDeduped:
deletedStorage, err = deleteNonDedupedData(vdiskID, cluster)
case config.StorageSemiDeduped:
deletedStorage, err = deleteSemiDedupedData(vdiskID, cluster)
default:
err = errors.Newf("%v is not a supported storage type", st)
}
return deletedTlogMetadata || deletedStorage, err
}
// ListVdisks scans a given storage cluster
// for available vdisks, and returns their ids.
// Optionally a predicate can be given to
// filter specific vdisks based on their identifiers.
// NOTE: this function is very slow,
// and puts a lot of pressure on the ARDB cluster.
func ListVdisks(cluster ardb.StorageCluster, pred func(vdiskID string) bool) ([]string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
serverCh, err := cluster.ServerIterator(ctx)
if err != nil {
return nil, err
}
type serverResult struct {
ids []string
err error
}
resultCh := make(chan serverResult)
var action listVdisksAction
if pred == nil {
action.filter = filterListedVdiskID
} else {
action.filter = func(str string) (string, bool) {
str, ok := filterListedVdiskID(str)
if !ok {
return "", false
}
return str, pred(str)
}
}
var serverCount int
var reply interface{}
for server := range serverCh {
server := server
go func() {
var result serverResult
log.Infof("listing all vdisks stored on %v", server.Config())
reply, result.err = server.Do(action)
if result.err == nil && reply != nil {
// [NOTE] this line of code relies on the fact that our
// custom `listVdisksAction` type returns a `[]string` value as a reply,
// as soon as that logic changes, this line will start causing trouble.
result.ids = reply.([]string)
}
select {
case resultCh <- result:
case <-ctx.Done():
}
}()
serverCount++
}
// collect the ids from all servers within the given cluster
var ids []string
var result serverResult
for i := 0; i < serverCount; i++ {
result = <-resultCh
if result.err != nil {
// return early, an error has occured!
return nil, result.err
}
ids = append(ids, result.ids...)
}
if len(ids) <= 1 {
return ids, nil // nothing to do
}
// sort and dedupe
sort.Strings(ids)
ids = dedupStrings(ids)
return ids, nil
}
type listVdisksAction struct {
filter func(string) (string, bool)
}
// Do implements StorageAction.Do
func (action listVdisksAction) Do(conn ardb.Conn) (reply interface{}, err error) {
const (
startCursor = "0"
itemCount = "5000"
)
var output, vdisks []string
var slice interface{}
// initial cursor and action
cursor := startCursor
scan := ardb.Command(command.Scan, cursor, "COUNT", itemCount)
// go through all available keys
for {
// get new cursor and raw data
cursor, slice, err = ardb.CursorAndValues(scan.Do(conn))
// convert the raw data to a string slice we can use
output, err = ardb.OptStrings(slice, err)
// return early in case of error
if err != nil {
return nil, err
}
// filter output
filterPos := 0
var ok bool
var vdiskID string
for i := range output {
vdiskID, ok = action.filter(output[i])
if ok {
output[filterPos] = vdiskID
filterPos++
}
}
output = output[:filterPos]
vdisks = append(vdisks, output...)
log.Debugf("%d/%s identifiers in iteration which match the given filters",
len(output), itemCount)
// stop in case we iterated through all possible values
if cursor == startCursor || cursor == "" {
break
}
scan = ardb.Command(command.Scan, cursor, "COUNT", itemCount)
}
return vdisks, nil
}
// Send implements StorageAction.Send
func (action listVdisksAction) Send(conn ardb.Conn) error {
return ErrMethodNotSupported
}
// KeysModified implements StorageAction.KeysModified
func (action listVdisksAction) KeysModified() ([]string, bool) {
return nil, false
}
// ListBlockIndices returns all indices stored for the given vdisk from a config source.
func ListBlockIndices(vdiskID string, source config.Source) ([]int64, error) {
staticConfig, err := config.ReadVdiskStaticConfig(source, vdiskID)
if err != nil {
return nil, err
}
nbdConfig, err := config.ReadNBDStorageConfig(source, vdiskID)
if err != nil {
return nil, errors.Wrap(err, "failed to ReadNBDStorageConfig")
}
// create (primary) storage cluster
// TODO: support optional slave cluster here
// see: https://github.com/zero-os/0-Disk/issues/445
cluster, err := ardb.NewCluster(nbdConfig.StorageCluster, nil) // not pooled
if err != nil {
return nil, errors.Wrapf(err,
"cannot create storage cluster model for primary cluster of vdisk %s",
vdiskID)
}
return ListBlockIndicesInCluster(vdiskID, staticConfig.Type, cluster)
}
// ListBlockIndicesInCluster returns all indices stored for the given vdisk from cluster configs.
// This function returns either an error OR indices.
func ListBlockIndicesInCluster(id string, t config.VdiskType, cluster ardb.StorageCluster) ([]int64, error) {
switch st := t.StorageType(); st {
case config.StorageDeduped:
return listDedupedBlockIndices(id, cluster)
case config.StorageNonDeduped:
return listNonDedupedBlockIndices(id, cluster)
case config.StorageSemiDeduped:
return listSemiDedupedBlockIndices(id, cluster)
default:
return nil, errors.Newf("%v is not a supported storage type", st)
}
}
// filterListedVdiskID only accepts keys with a known prefix,
// if no known prefix is found an empty string is returned,
// otherwise the prefix is removed and the vdiskID is returned.
func filterListedVdiskID(key string) (string, bool) {
parts := listStorageKeyPrefixRex.FindStringSubmatch(key)
if len(parts) == 3 {
return parts[2], true
}
return "", false
}
var listStorageKeyPrefixRex = regexp.MustCompile("^(" +
strings.Join(listStorageKeyPrefixes, "|") +
")(.+)$")
var listStorageKeyPrefixes = []string{
lbaStorageKeyPrefix,
nonDedupedStorageKeyPrefix,
}
// sortInt64s sorts a slice of int64s
func sortInt64s(s []int64) {
if len(s) < 2 {
return
}
sort.Sort(int64Slice(s))
}
// int64Slice implements the sort.Interface for a slice of int64s
type int64Slice []int64
func (s int64Slice) Len() int { return len(s) }
func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// dedupInt64s deduplicates a given int64 slice which is already sorted.
func dedupInt64s(s []int64) []int64 | {
p := len(s) - 1
if p <= 0 {
return s
}
for i := p - 1; i >= 0; i-- {
if s[p] != s[i] {
p--
s[p] = s[i]
}
}
return s[p:]
} | identifier_body |
|
storage.go | storage
return NewBlockStorage(cfg, cluster, templateCluster)
}
// NewBlockStorage returns the correct block storage based on the given VdiskConfig.
func NewBlockStorage(cfg BlockStorageConfig, cluster, templateCluster ardb.StorageCluster) (storage BlockStorage, err error) {
err = cfg.Validate()
if err != nil {
return
}
vdiskType := cfg.VdiskType
// templateCluster gets disabled,
// if vdisk type has no template support.
if !vdiskType.TemplateSupport() {
templateCluster = nil
}
switch storageType := vdiskType.StorageType(); storageType {
case config.StorageDeduped:
return Deduped(
cfg.VdiskID,
cfg.BlockSize,
cfg.LBACacheLimit,
cluster,
templateCluster)
case config.StorageNonDeduped:
return NonDeduped(
cfg.VdiskID,
cfg.TemplateVdiskID,
cfg.BlockSize,
cluster,
templateCluster)
case config.StorageSemiDeduped:
return SemiDeduped(
cfg.VdiskID,
cfg.BlockSize,
cfg.LBACacheLimit,
cluster,
templateCluster)
default:
return nil, errors.Newf(
"no block storage available for %s's storage type %s",
cfg.VdiskID, storageType)
}
}
// VdiskExists returns true if the vdisk in question exists in the given ARDB storage cluster.
// An error is returned in case this couldn't be verified for whatever reason.
// Also return vdiskType and ardb cluster from config
func VdiskExists(vdiskID string, source config.Source) (bool, error) {
// gather configs
staticConfig, err := config.ReadVdiskStaticConfig(source, vdiskID)
if err != nil {
return false, errors.Wrapf(err,
"cannot read static vdisk config for vdisk %s", vdiskID)
}
nbdConfig, err := config.ReadVdiskNBDConfig(source, vdiskID)
if err != nil {
return false, errors.Wrapf(err,
"cannot read nbd storage config for vdisk %s", vdiskID)
}
clusterConfig, err := config.ReadStorageClusterConfig(source, nbdConfig.StorageClusterID)
if err != nil {
return false, errors.Wrapf(err,
"cannot read storage cluster config for cluster %s",
nbdConfig.StorageClusterID)
}
// create (primary) storage cluster
cluster, err := ardb.NewCluster(*clusterConfig, nil) // not pooled
if err != nil {
return false, errors.Wrapf(err,
"cannot create storage cluster model for cluster %s",
nbdConfig.StorageClusterID)
}
return VdiskExistsInCluster(vdiskID, staticConfig.Type, cluster)
}
// VdiskExistsInCluster returns true if the vdisk in question exists in the given ARDB storage cluster.
// An error is returned in case this couldn't be verified for whatever reason.
func | (vdiskID string, t config.VdiskType, cluster ardb.StorageCluster) (bool, error) {
switch st := t.StorageType(); st {
case config.StorageDeduped:
return dedupedVdiskExists(vdiskID, cluster)
case config.StorageNonDeduped:
return nonDedupedVdiskExists(vdiskID, cluster)
case config.StorageSemiDeduped:
return semiDedupedVdiskExists(vdiskID, cluster)
default:
return false, errors.Newf("%v is not a supported storage type", st)
}
}
// CopyVdiskConfig is the config for a vdisk
// used when calling the CopyVdisk primitive.
type CopyVdiskConfig struct {
VdiskID string
Type config.VdiskType
BlockSize int64
}
// CopyVdisk allows you to copy a vdisk from a source to a target vdisk.
// The source and target vdisks have to have the same storage type and block size.
// They can be stored on the same or different clusters.
func CopyVdisk(source, target CopyVdiskConfig, sourceCluster, targetCluster ardb.StorageCluster) error {
sourceStorageType := source.Type.StorageType()
targetStorageType := target.Type.StorageType()
if sourceStorageType != targetStorageType {
return errors.Newf(
"source vdisk %s and target vdisk %s have different storageTypes (%s != %s)",
source.VdiskID, target.VdiskID, sourceStorageType, targetStorageType)
}
var err error
switch sourceStorageType {
case config.StorageDeduped:
err = copyDedupedMetadata(
source.VdiskID, target.VdiskID, source.BlockSize, target.BlockSize,
sourceCluster, targetCluster)
case config.StorageNonDeduped:
err = copyNonDedupedData(
source.VdiskID, target.VdiskID, source.BlockSize, target.BlockSize,
sourceCluster, targetCluster)
case config.StorageSemiDeduped:
err = copySemiDeduped(
source.VdiskID, target.VdiskID, source.BlockSize, target.BlockSize,
sourceCluster, targetCluster)
default:
err = errors.Newf(
"%v is not a supported storage type", sourceStorageType)
}
if err != nil || !source.Type.TlogSupport() || !target.Type.TlogSupport() {
return err
}
return copyTlogMetadata(source.VdiskID, target.VdiskID, sourceCluster, targetCluster)
}
// DeleteVdisk returns true if the vdisk in question was deleted from the given ARDB storage cluster.
// An error is returned in case this couldn't be deleted (completely) for whatever reason.
//
// Note that for deduped storage the actual block data isn't deleted or dereferenced.
// See https://github.com/zero-os/0-Disk/issues/147
func DeleteVdisk(vdiskID string, configSource config.Source) (bool, error) {
staticConfig, err := config.ReadVdiskStaticConfig(configSource, vdiskID)
if err != nil {
return false, err
}
nbdConfig, err := config.ReadVdiskNBDConfig(configSource, vdiskID)
if err != nil {
return false, err
}
clusterConfig, err := config.ReadStorageClusterConfig(configSource, nbdConfig.StorageClusterID)
if err != nil {
return false, err
}
// if slave cluster is configured, we'll want to delete the vdisk from it as well
if nbdConfig.SlaveStorageClusterID != "" {
slaveClusterCfg, err := config.ReadStorageClusterConfig(configSource, nbdConfig.SlaveStorageClusterID)
if err != nil {
return false, err
}
clusterConfig.Servers = append(clusterConfig.Servers, slaveClusterCfg.Servers...)
}
// create a cluster of all primary (and slave) servers
cluster, err := ardb.NewCluster(*clusterConfig, nil)
if err != nil {
return false, err
}
// delete all data for this vdisk found in primary (and slave) servers
return DeleteVdiskInCluster(vdiskID, staticConfig.Type, cluster)
}
// DeleteVdiskInCluster returns true if the vdisk in question was deleted from the given ARDB storage cluster.
// An error is returned in case this couldn't be deleted (completely) for whatever reason.
//
// Note that for deduped storage the actual block data isn't deleted or dereferenced.
// See https://github.com/zero-os/0-Disk/issues/147
func DeleteVdiskInCluster(vdiskID string, t config.VdiskType, cluster ardb.StorageCluster) (bool, error) {
var err error
var deletedTlogMetadata bool
if t.TlogSupport() {
command := ardb.Command(command.Delete, tlogMetadataKey(vdiskID))
deletedTlogMetadata, err = ardb.Bool(cluster.Do(command))
if err != nil {
return false, err
}
if deletedTlogMetadata {
log.Infof("deleted tlog metadata stored for vdisk %s on first available server", vdiskID)
}
}
var deletedStorage bool
switch st := t.StorageType(); st {
case config.StorageDeduped:
deletedStorage, err = deleteDedupedData(vdiskID, cluster)
case config.StorageNonDeduped:
deletedStorage, err = deleteNonDedupedData(vdiskID, cluster)
case config.StorageSemiDeduped:
deletedStorage, err = deleteSemiDedupedData(vdiskID, cluster)
default:
err = errors.Newf("%v is not a supported storage type", st)
}
return deletedTlogMetadata || deletedStorage, err
}
// ListVdisks scans a given storage cluster
// for available vdisks, and returns their ids.
// Optionally a predicate can be given to
// filter specific vdisks based on their identifiers.
// NOTE: this function is very slow,
// and puts a lot of pressure on the ARDB cluster.
func ListVdisks(cluster ardb.StorageCluster, pred func(vdiskID string) bool) ([]string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
serverCh, err := cluster.ServerIterator | VdiskExistsInCluster | identifier_name |
storage.go | storage
return NewBlockStorage(cfg, cluster, templateCluster)
}
// NewBlockStorage returns the correct block storage based on the given VdiskConfig.
func NewBlockStorage(cfg BlockStorageConfig, cluster, templateCluster ardb.StorageCluster) (storage BlockStorage, err error) {
err = cfg.Validate()
if err != nil {
return
}
vdiskType := cfg.VdiskType
// templateCluster gets disabled,
// if vdisk type has no template support.
if !vdiskType.TemplateSupport() {
templateCluster = nil
}
switch storageType := vdiskType.StorageType(); storageType {
case config.StorageDeduped:
return Deduped(
cfg.VdiskID,
cfg.BlockSize,
cfg.LBACacheLimit,
cluster,
templateCluster)
case config.StorageNonDeduped:
return NonDeduped(
cfg.VdiskID,
cfg.TemplateVdiskID,
cfg.BlockSize,
cluster,
templateCluster)
case config.StorageSemiDeduped:
return SemiDeduped(
cfg.VdiskID,
cfg.BlockSize,
cfg.LBACacheLimit,
cluster,
templateCluster)
default:
return nil, errors.Newf(
"no block storage available for %s's storage type %s",
cfg.VdiskID, storageType)
}
}
// VdiskExists returns true if the vdisk in question exists in the given ARDB storage cluster.
// An error is returned in case this couldn't be verified for whatever reason.
// Also return vdiskType and ardb cluster from config
func VdiskExists(vdiskID string, source config.Source) (bool, error) {
// gather configs
staticConfig, err := config.ReadVdiskStaticConfig(source, vdiskID)
if err != nil {
return false, errors.Wrapf(err,
"cannot read static vdisk config for vdisk %s", vdiskID)
}
nbdConfig, err := config.ReadVdiskNBDConfig(source, vdiskID)
if err != nil {
return false, errors.Wrapf(err,
"cannot read nbd storage config for vdisk %s", vdiskID)
}
clusterConfig, err := config.ReadStorageClusterConfig(source, nbdConfig.StorageClusterID)
if err != nil {
return false, errors.Wrapf(err,
"cannot read storage cluster config for cluster %s",
nbdConfig.StorageClusterID)
}
// create (primary) storage cluster
cluster, err := ardb.NewCluster(*clusterConfig, nil) // not pooled
if err != nil {
return false, errors.Wrapf(err,
"cannot create storage cluster model for cluster %s",
nbdConfig.StorageClusterID)
}
return VdiskExistsInCluster(vdiskID, staticConfig.Type, cluster)
}
// VdiskExistsInCluster returns true if the vdisk in question exists in the given ARDB storage cluster.
// An error is returned in case this couldn't be verified for whatever reason. | func VdiskExistsInCluster(vdiskID string, t config.VdiskType, cluster ardb.StorageCluster) (bool, error) {
switch st := t.StorageType(); st {
case config.StorageDeduped:
return dedupedVdiskExists(vdiskID, cluster)
case config.StorageNonDeduped:
return nonDedupedVdiskExists(vdiskID, cluster)
case config.StorageSemiDeduped:
return semiDedupedVdiskExists(vdiskID, cluster)
default:
return false, errors.Newf("%v is not a supported storage type", st)
}
}
// CopyVdiskConfig is the config for a vdisk
// used when calling the CopyVdisk primitive.
type CopyVdiskConfig struct {
VdiskID string
Type config.VdiskType
BlockSize int64
}
// CopyVdisk allows you to copy a vdisk from a source to a target vdisk.
// The source and target vdisks have to have the same storage type and block size.
// They can be stored on the same or different clusters.
func CopyVdisk(source, target CopyVdiskConfig, sourceCluster, targetCluster ardb.StorageCluster) error {
sourceStorageType := source.Type.StorageType()
targetStorageType := target.Type.StorageType()
if sourceStorageType != targetStorageType {
return errors.Newf(
"source vdisk %s and target vdisk %s have different storageTypes (%s != %s)",
source.VdiskID, target.VdiskID, sourceStorageType, targetStorageType)
}
var err error
switch sourceStorageType {
case config.StorageDeduped:
err = copyDedupedMetadata(
source.VdiskID, target.VdiskID, source.BlockSize, target.BlockSize,
sourceCluster, targetCluster)
case config.StorageNonDeduped:
err = copyNonDedupedData(
source.VdiskID, target.VdiskID, source.BlockSize, target.BlockSize,
sourceCluster, targetCluster)
case config.StorageSemiDeduped:
err = copySemiDeduped(
source.VdiskID, target.VdiskID, source.BlockSize, target.BlockSize,
sourceCluster, targetCluster)
default:
err = errors.Newf(
"%v is not a supported storage type", sourceStorageType)
}
if err != nil || !source.Type.TlogSupport() || !target.Type.TlogSupport() {
return err
}
return copyTlogMetadata(source.VdiskID, target.VdiskID, sourceCluster, targetCluster)
}
// DeleteVdisk returns true if the vdisk in question was deleted from the given ARDB storage cluster.
// An error is returned in case this couldn't be deleted (completely) for whatever reason.
//
// Note that for deduped storage the actual block data isn't deleted or dereferenced.
// See https://github.com/zero-os/0-Disk/issues/147
func DeleteVdisk(vdiskID string, configSource config.Source) (bool, error) {
staticConfig, err := config.ReadVdiskStaticConfig(configSource, vdiskID)
if err != nil {
return false, err
}
nbdConfig, err := config.ReadVdiskNBDConfig(configSource, vdiskID)
if err != nil {
return false, err
}
clusterConfig, err := config.ReadStorageClusterConfig(configSource, nbdConfig.StorageClusterID)
if err != nil {
return false, err
}
// if slave cluster is configured, we'll want to delete the vdisk from it as well
if nbdConfig.SlaveStorageClusterID != "" {
slaveClusterCfg, err := config.ReadStorageClusterConfig(configSource, nbdConfig.SlaveStorageClusterID)
if err != nil {
return false, err
}
clusterConfig.Servers = append(clusterConfig.Servers, slaveClusterCfg.Servers...)
}
// create a cluster of all primary (and slave) servers
cluster, err := ardb.NewCluster(*clusterConfig, nil)
if err != nil {
return false, err
}
// delete all data for this vdisk found in primary (and slave) servers
return DeleteVdiskInCluster(vdiskID, staticConfig.Type, cluster)
}
// DeleteVdiskInCluster returns true if the vdisk in question was deleted from the given ARDB storage cluster.
// An error is returned in case this couldn't be deleted (completely) for whatever reason.
//
// Note that for deduped storage the actual block data isn't deleted or dereferenced.
// See https://github.com/zero-os/0-Disk/issues/147
func DeleteVdiskInCluster(vdiskID string, t config.VdiskType, cluster ardb.StorageCluster) (bool, error) {
var err error
var deletedTlogMetadata bool
if t.TlogSupport() {
command := ardb.Command(command.Delete, tlogMetadataKey(vdiskID))
deletedTlogMetadata, err = ardb.Bool(cluster.Do(command))
if err != nil {
return false, err
}
if deletedTlogMetadata {
log.Infof("deleted tlog metadata stored for vdisk %s on first available server", vdiskID)
}
}
var deletedStorage bool
switch st := t.StorageType(); st {
case config.StorageDeduped:
deletedStorage, err = deleteDedupedData(vdiskID, cluster)
case config.StorageNonDeduped:
deletedStorage, err = deleteNonDedupedData(vdiskID, cluster)
case config.StorageSemiDeduped:
deletedStorage, err = deleteSemiDedupedData(vdiskID, cluster)
default:
err = errors.Newf("%v is not a supported storage type", st)
}
return deletedTlogMetadata || deletedStorage, err
}
// ListVdisks scans a given storage cluster
// for available vdisks, and returns their ids.
// Optionally a predicate can be given to
// filter specific vdisks based on their identifiers.
// NOTE: this function is very slow,
// and puts a lot of pressure on the ARDB cluster.
func ListVdisks(cluster ardb.StorageCluster, pred func(vdiskID string) bool) ([]string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
serverCh, err := cluster.ServerIterator(ctx | random_line_split |
|
FinalProject.py | .finalBill.items():
sum += details[1] * details[0]
print(product + " ,quantity: " + str(details[1]) + " ,price: " + str(details[1] * details[0]) + "$")
print("Total cost: " + str(sum)+"$")
class Movie:
def __init__(self, code, movieName, category, ScreeningDate, price):
self.code = code
self.movieName = movieName
self.category = category
self.ScreeningDate = ScreeningDate
self.price = price
class Snack:
def __init__(self,snackName, snackPrice):
self.snackName = snackName
self.snackPrice = snackPrice
class Buffet:
def __init__(self):
self.avilableProducts = [Snack("Coca cola",15),
Snack("Water",5),
Snack("Popcorn",20),
Snack("Nachos",15),
Snack("Chocolate",10),
Snack("Bamba",10)]
def printBuffet(self):
j = 1
print("********************")
print("Buffet Menu:")
for i in self.avilableProducts :
print(str(j) + ". " + i.snackName +", Price: " + str(i.snackPrice) + "$")
j +=1
class Cinema:
def __init__(self):
self.avilableMovies = [Movie(536, "Fast & Furious", "Action", "24/06/2021", 30),
Movie(245, "Lion King", "Disney", "24/06/2021",20),
Movie(875, "Aladdin", "Disney", "25/07/2021", 20),
Movie(444, "Taken", "Action", "30/06/2021", 30),
Movie(333, "Neighbors", "Comedy", "26/06/2021", 25),
Movie(555, "We're the Millers", "Comedy", "01/07/2021", 25),
Movie(809, "Beauty and the Beast", "Disney", "29/06/2021", 20),
Movie(666, "American Sniper", "Action", "25/06/2021", 30),
Movie(213, "Django Unchained", "Action", "01/07/2021", 30),
Movie(617, "The Little Mermaid", "Disney", "03/07/2021", 20),
Movie(321, "The Hangover", "Comedy", "05/07/2021", 25),
Movie(893, "American Pie", "Comedy", "10/07/2021", 25),
Movie(445, "Mulan", "Disney", "07/07/2021", 20)]
def printMovies(self):
print("Avilable movies in our Cinema:")
j = 1
for i in self.avilableMovies :
print(str(j) + ". Name: " + i.movieName + ", Category: " + i.category + ", Date: " + i.ScreeningDate + " , Price: " + str(i.price) + "$")
j +=1
def menu():
def ExcNoneQuery(sql_command, values):
server= 'LAPTOP-K8MAK0VU\SQLEXPRESS'
database = 'Movies'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server}; \
SERVER=' + server +'; \
DATABASE='+ database +'; \
Trusted_connection=yes;')
crsr = cnxn.cursor()
crsr.execute(sql_command, values)
crsr.commit()
crsr.close()
cnxn.close()
def ExcQuery(sql_command):
server= 'LAPTOP-K8MAK0VU\SQLEXPRESS'
database = 'Movies'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server}; \
SERVER=' + server +'; \
DATABASE='+ database +'; \
Trusted_connection=yes;')
crsr = cnxn.cursor()
crsr.execute(sql_command)
for row in crsr:
print(row)
crsr.close()
cnxn.close()
window = Tk()
window.title("Nitzan & Eliran")
bg1 = PhotoImage(file=r'''C:\Users\Nitzan Gabay\OneDrive - Ruppin Academic Center\Desktop\שנה ב'\סמסטר ב\פייטון\מבחן מסכם בפייטון\Popcorn.png''')
bg1_lable = Label(window, image= bg1)
bg1_lable.place(x=0, y=0, relwidth=1, relheight=1)
lableName = Label(window, text="Name:", font=("Sitka Small", 30), bg="black", fg="white" )
lableName.grid(column=8, row=1)
userName = Entry(window,font=("Sitka Small", 20),width=5)
userName.grid(column=8, row=2)
lableBudget = Label(window, text="Budget:", font=("Sitka Small", 30), bg="black", fg="white")
lableBudget.grid(column=8, row=3)
userBudget = Entry(window, font=("Sitka Small", 20),width=5)
userBudget.grid(column=8, row=4)
lable = Label(window, text="", font=("David bold", 10))
lable.grid(column=0, row=5)
lable = Label(window, text="", font=("David bold", 10))
lable.grid(column=0, row=6)
def clickToStart():
messagebox.showinfo("Setup", "Hello " + userName.get() + "!\nWelcome to our Cinema!")
global name1
global budget1
name1 = userName.get()
budget2 = userBudget.get()
budget1 = int(budget2)
window.destroy()
bt = Button(window, text = "Submit",font=("Sitka Small", 15), bg='black', fg='white', width=10, command = clickToStart)
bt.grid(column=5, row=8)
window.geometry('480x330')
window.mainloop()
customers = Customer(name1, budget1)
cinema = Cinema()
buffet = Buffet()
def clickBtn1():
cinema.printMovies()
def clickBtn2():
customers.BuyTicket(cinema)
def clickBtn3():
buffet.printBuffet()
def clickBtn4():
customers.BuySnack(buffet)
def clickBtn5():
print("*****************\n")
print("Your final bill: ")
customers.ShowFinalBill()
def clickBtn6():
sql_command = 'Insert into Customers(Name,Budget) VALUES(?,?)'
values = (name1,budget1)
ExcNoneQuery(sql_command, values)
print("The customer " + name1 + " saved to DB")
def clickBtn7():
sql_command = 'Select * from Customers Order By Id'
ExcQuery(sql_command)
def clickBtn8():
wb = openpyxl.load_workbook(filename = './Movies.xlsx')
sheet = wb['Movies']
i = 1
for item in cinema.avilableMovies:
sheet.cell(row = i, column = 1).value = str(item.movieName)
sheet.cell(row = i, column = 2).value = str(item.category)
sheet.cell(row = i, column = 3).value = str(item.ScreeningDate)
i += 1
wb.save('Movies.xlsx')
print(str(i-1) + " Movies added to Excel file.")
def clickBtn9():
print("By |
newMenu.destroy()
newMenu = Tk()
newMenu.title("Nitzan & Eliran")
bg = PhotoImage(file=r'''C:\Users\Nitzan Gabay\OneDrive - Ruppin Academic Center\Desktop\שנה ב'\סמסטר ב\פייטון\מבחן מסכם בפייטון\Cinema1.png''')
bg_lable = Label(newMenu, image= bg)
bg_lable.place(x=0, y=0, relwidth=1, relheight=1)
headline = Label(newMenu, text = " Ruppin Cinema ", font=("Castellar", 49), fg="White", bg="black")
headline.pack(pady=40)
my_frame = Frame(newMenu)
my_frame.pack(pady=0)
bt1 = Button(my_frame, text = "See Avilable Movies",font=("Sitka Small", 16), bg='black', fg | e Bye..") | identifier_name |
FinalProject.py |
self.finalBill[product] = (productPrice, productAmount)
def ShowFinalBill(self):
sum = 0
for product, details in self.finalBill.items():
sum += details[1] * details[0]
print(product + " ,quantity: " + str(details[1]) + " ,price: " + str(details[1] * details[0]) + "$")
print("Total cost: " + str(sum)+"$")
class Movie:
def __init__(self, code, movieName, category, ScreeningDate, price):
self.code = code
self.movieName = movieName
self.category = category
self.ScreeningDate = ScreeningDate
self.price = price
class Snack:
def __init__(self,snackName, snackPrice):
self.snackName = snackName
self.snackPrice = snackPrice
class Buffet:
def __init__(self):
self.avilableProducts = [Snack("Coca cola",15),
Snack("Water",5),
Snack("Popcorn",20),
Snack("Nachos",15),
Snack("Chocolate",10),
Snack("Bamba",10)]
def printBuffet(self):
j = 1
print("********************")
print("Buffet Menu:")
for i in self.avilableProducts :
print(str(j) + ". " + i.snackName +", Price: " + str(i.snackPrice) + "$")
j +=1
class Cinema:
def __init__(self):
self.avilableMovies = [Movie(536, "Fast & Furious", "Action", "24/06/2021", 30),
Movie(245, "Lion King", "Disney", "24/06/2021",20),
Movie(875, "Aladdin", "Disney", "25/07/2021", 20),
Movie(444, "Taken", "Action", "30/06/2021", 30),
Movie(333, "Neighbors", "Comedy", "26/06/2021", 25),
Movie(555, "We're the Millers", "Comedy", "01/07/2021", 25),
Movie(809, "Beauty and the Beast", "Disney", "29/06/2021", 20),
Movie(666, "American Sniper", "Action", "25/06/2021", 30),
Movie(213, "Django Unchained", "Action", "01/07/2021", 30),
Movie(617, "The Little Mermaid", "Disney", "03/07/2021", 20),
Movie(321, "The Hangover", "Comedy", "05/07/2021", 25),
Movie(893, "American Pie", "Comedy", "10/07/2021", 25),
Movie(445, "Mulan", "Disney", "07/07/2021", 20)]
def printMovies(self):
print("Avilable movies in our Cinema:")
j = 1
for i in self.avilableMovies :
print(str(j) + ". Name: " + i.movieName + ", Category: " + i.category + ", Date: " + i.ScreeningDate + " , Price: " + str(i.price) + "$")
j +=1
def menu():
def ExcNoneQuery(sql_command, values):
server= 'LAPTOP-K8MAK0VU\SQLEXPRESS'
database = 'Movies'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server}; \
SERVER=' + server +'; \
DATABASE='+ database +'; \
Trusted_connection=yes;')
crsr = cnxn.cursor()
crsr.execute(sql_command, values)
crsr.commit()
crsr.close()
cnxn.close()
def ExcQuery(sql_command):
server= 'LAPTOP-K8MAK0VU\SQLEXPRESS'
database = 'Movies'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server}; \
SERVER=' + server +'; \
DATABASE='+ database +'; \
Trusted_connection=yes;')
crsr = cnxn.cursor()
crsr.execute(sql_command)
for row in crsr:
print(row)
crsr.close()
cnxn.close()
window = Tk()
window.title("Nitzan & Eliran")
bg1 = PhotoImage(file=r'''C:\Users\Nitzan Gabay\OneDrive - Ruppin Academic Center\Desktop\שנה ב'\סמסטר ב\פייטון\מבחן מסכם בפייטון\Popcorn.png''')
bg1_lable = Label(window, image= bg1)
bg1_lable.place(x=0, y=0, relwidth=1, relheight=1)
lableName = Label(window, text="Name:", font=("Sitka Small", 30), bg="black", fg="white" )
lableName.grid(column=8, row=1)
userName = Entry(window,font=("Sitka Small", 20),width=5)
userName.grid(column=8, row=2)
lableBudget = Label(window, text="Budget:", font=("Sitka Small", 30), bg="black", fg="white")
lableBudget.grid(column=8, row=3)
userBudget = Entry(window, font=("Sitka Small", 20),width=5)
userBudget.grid(column=8, row=4)
lable = Label(window, text="", font=("David bold", 10))
lable.grid(column=0, row=5)
lable = Label(window, text="", font=("David bold", 10))
lable.grid(column=0, row=6)
def clickToStart():
messagebox.showinfo("Setup", "Hello " + userName.get() + "!\nWelcome to our Cinema!")
global name1
global budget1
name1 = userName.get()
budget2 = userBudget.get()
budget1 = int(budget2)
window.destroy()
bt = Button(window, text = "Submit",font=("Sitka Small", 15), bg='black', fg='white', width=10, command = clickToStart)
bt.grid(column=5, row=8)
window.geometry('480x330')
window.mainloop()
customers = Customer(name1, budget1)
cinema = Cinema()
buffet = Buffet()
def clickBtn1():
cinema.printMovies()
def clickBtn2():
customers.BuyTicket(cinema)
def clickBtn3():
buffet.printBuffet()
def clickBtn4():
customers.BuySnack(buffet)
def clickBtn5():
print("*****************\n")
print("Your final bill: ")
customers.ShowFinalBill()
def clickBtn6():
sql_command = 'Insert into Customers(Name,Budget) VALUES(?,?)'
values = (name1,budget1)
ExcNoneQuery(sql_command, values)
print("The customer " + name1 + " saved to DB")
def clickBtn7():
sql_command = 'Select * from Customers Order By Id'
ExcQuery(sql_command)
def clickBtn8():
wb = openpyxl.load_workbook(filename = './Movies.xlsx')
sheet = wb['Movies']
i = 1
for item in cinema.avilableMovies:
sheet.cell(row = i, column = 1).value = str(item.movieName)
sheet.cell(row = i, column = 2).value = str(item.category)
sheet.cell(row = i, column = 3).value = str(item.ScreeningDate)
i += 1
wb.save('Movies.xlsx')
print(str(i-1) + " Movies added to Excel file.")
def clickBtn9():
print("Bye Bye..")
newMenu.destroy()
newMenu = Tk()
newMenu.title("Nitzan & Eliran")
bg = PhotoImage(file=r'''C:\Users\Nitzan Gabay\OneDrive - Ruppin Academic Center\Desktop\שנה ב'\סמסטר ב\פייטון\מבחן מסכם בפייטון\Cinema1.png''')
bg_lable = Label(newMenu, image= bg)
bg_lable.place(x=0, y=0, relwidth=1, relheight=1)
headline = Label(newMenu, text = " Ruppin Cinema ", font=("Castellar", 49), | if product in i:
self.finalBill[product] = (productPrice, self.finalBill[product][1] + productAmount)
return | conditional_block |
|
FinalProject.py | Bill.items():
sum += details[1] * details[0]
print(product + " ,quantity: " + str(details[1]) + " ,price: " + str(details[1] * details[0]) + "$")
print("Total cost: " + str(sum)+"$")
class Movie:
def __init__(self, code, movieName, category, ScreeningDate, price):
self.code = code
self.movieName = movieName
self.category = category
self.ScreeningDate = ScreeningDate
self.price = price
class Snack:
def __init__(self,snackName, snackPrice):
self.snackName = snackName
self.snackPrice = snackPrice
class Buffet:
def __init__(self):
self.avilableProducts = [Snack("Coca cola",15),
Snack("Water",5),
Snack("Popcorn",20),
Snack("Nachos",15),
Snack("Chocolate",10),
Snack("Bamba",10)]
def printBuffet(self):
j = 1
print("********************")
print("Buffet Menu:")
for i in self.avilableProducts :
print(str(j) + ". " + i.snackName +", Price: " + str(i.snackPrice) + "$")
j +=1
class Cinema:
| j +=1
def menu():
def ExcNoneQuery(sql_command, values):
server= 'LAPTOP-K8MAK0VU\SQLEXPRESS'
database = 'Movies'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server}; \
SERVER=' + server +'; \
DATABASE='+ database +'; \
Trusted_connection=yes;')
crsr = cnxn.cursor()
crsr.execute(sql_command, values)
crsr.commit()
crsr.close()
cnxn.close()
def ExcQuery(sql_command):
server= 'LAPTOP-K8MAK0VU\SQLEXPRESS'
database = 'Movies'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server}; \
SERVER=' + server +'; \
DATABASE='+ database +'; \
Trusted_connection=yes;')
crsr = cnxn.cursor()
crsr.execute(sql_command)
for row in crsr:
print(row)
crsr.close()
cnxn.close()
window = Tk()
window.title("Nitzan & Eliran")
bg1 = PhotoImage(file=r'''C:\Users\Nitzan Gabay\OneDrive - Ruppin Academic Center\Desktop\שנה ב'\סמסטר ב\פייטון\מבחן מסכם בפייטון\Popcorn.png''')
bg1_lable = Label(window, image= bg1)
bg1_lable.place(x=0, y=0, relwidth=1, relheight=1)
lableName = Label(window, text="Name:", font=("Sitka Small", 30), bg="black", fg="white" )
lableName.grid(column=8, row=1)
userName = Entry(window,font=("Sitka Small", 20),width=5)
userName.grid(column=8, row=2)
lableBudget = Label(window, text="Budget:", font=("Sitka Small", 30), bg="black", fg="white")
lableBudget.grid(column=8, row=3)
userBudget = Entry(window, font=("Sitka Small", 20),width=5)
userBudget.grid(column=8, row=4)
lable = Label(window, text="", font=("David bold", 10))
lable.grid(column=0, row=5)
lable = Label(window, text="", font=("David bold", 10))
lable.grid(column=0, row=6)
def clickToStart():
messagebox.showinfo("Setup", "Hello " + userName.get() + "!\nWelcome to our Cinema!")
global name1
global budget1
name1 = userName.get()
budget2 = userBudget.get()
budget1 = int(budget2)
window.destroy()
bt = Button(window, text = "Submit",font=("Sitka Small", 15), bg='black', fg='white', width=10, command = clickToStart)
bt.grid(column=5, row=8)
window.geometry('480x330')
window.mainloop()
customers = Customer(name1, budget1)
cinema = Cinema()
buffet = Buffet()
def clickBtn1():
cinema.printMovies()
def clickBtn2():
customers.BuyTicket(cinema)
def clickBtn3():
buffet.printBuffet()
def clickBtn4():
customers.BuySnack(buffet)
def clickBtn5():
print("*****************\n")
print("Your final bill: ")
customers.ShowFinalBill()
def clickBtn6():
sql_command = 'Insert into Customers(Name,Budget) VALUES(?,?)'
values = (name1,budget1)
ExcNoneQuery(sql_command, values)
print("The customer " + name1 + " saved to DB")
def clickBtn7():
sql_command = 'Select * from Customers Order By Id'
ExcQuery(sql_command)
def clickBtn8():
wb = openpyxl.load_workbook(filename = './Movies.xlsx')
sheet = wb['Movies']
i = 1
for item in cinema.avilableMovies:
sheet.cell(row = i, column = 1).value = str(item.movieName)
sheet.cell(row = i, column = 2).value = str(item.category)
sheet.cell(row = i, column = 3).value = str(item.ScreeningDate)
i += 1
wb.save('Movies.xlsx')
print(str(i-1) + " Movies added to Excel file.")
def clickBtn9():
print("Bye Bye..")
newMenu.destroy()
newMenu = Tk()
newMenu.title("Nitzan & Eliran")
bg = PhotoImage(file=r'''C:\Users\Nitzan Gabay\OneDrive - Ruppin Academic Center\Desktop\שנה ב'\סמסטר ב\פייטון\מבחן מסכם בפייטון\Cinema1.png''')
bg_lable = Label(newMenu, image= bg)
bg_lable.place(x=0, y=0, relwidth=1, relheight=1)
headline = Label(newMenu, text = " Ruppin Cinema ", font=("Castellar", 49), fg="White", bg="black")
headline.pack(pady=40)
my_frame = Frame(newMenu)
my_frame.pack(pady=0)
bt1 = Button(my_frame, text = "See Avilable Movies",font=("Sitka Small", 16), bg='black', fg | def __init__(self):
self.avilableMovies = [Movie(536, "Fast & Furious", "Action", "24/06/2021", 30),
Movie(245, "Lion King", "Disney", "24/06/2021",20),
Movie(875, "Aladdin", "Disney", "25/07/2021", 20),
Movie(444, "Taken", "Action", "30/06/2021", 30),
Movie(333, "Neighbors", "Comedy", "26/06/2021", 25),
Movie(555, "We're the Millers", "Comedy", "01/07/2021", 25),
Movie(809, "Beauty and the Beast", "Disney", "29/06/2021", 20),
Movie(666, "American Sniper", "Action", "25/06/2021", 30),
Movie(213, "Django Unchained", "Action", "01/07/2021", 30),
Movie(617, "The Little Mermaid", "Disney", "03/07/2021", 20),
Movie(321, "The Hangover", "Comedy", "05/07/2021", 25),
Movie(893, "American Pie", "Comedy", "10/07/2021", 25),
Movie(445, "Mulan", "Disney", "07/07/2021", 20)]
def printMovies(self):
print("Avilable movies in our Cinema:")
j = 1
for i in self.avilableMovies :
print(str(j) + ". Name: " + i.movieName + ", Category: " + i.category + ", Date: " + i.ScreeningDate + " , Price: " + str(i.price) + "$")
| identifier_body |
FinalProject.py | Bill.items():
sum += details[1] * details[0]
print(product + " ,quantity: " + str(details[1]) + " ,price: " + str(details[1] * details[0]) + "$")
print("Total cost: " + str(sum)+"$")
class Movie:
def __init__(self, code, movieName, category, ScreeningDate, price):
self.code = code
self.movieName = movieName
self.category = category
self.ScreeningDate = ScreeningDate
self.price = price
class Snack:
def __init__(self,snackName, snackPrice):
self.snackName = snackName
self.snackPrice = snackPrice
class Buffet:
def __init__(self):
self.avilableProducts = [Snack("Coca cola",15),
Snack("Water",5),
Snack("Popcorn",20),
Snack("Nachos",15),
Snack("Chocolate",10),
Snack("Bamba",10)]
def printBuffet(self):
j = 1
print("********************")
print("Buffet Menu:")
for i in self.avilableProducts :
print(str(j) + ". " + i.snackName +", Price: " + str(i.snackPrice) + "$")
j +=1
class Cinema:
def __init__(self):
self.avilableMovies = [Movie(536, "Fast & Furious", "Action", "24/06/2021", 30),
Movie(245, "Lion King", "Disney", "24/06/2021",20),
Movie(875, "Aladdin", "Disney", "25/07/2021", 20),
Movie(444, "Taken", "Action", "30/06/2021", 30),
Movie(333, "Neighbors", "Comedy", "26/06/2021", 25),
Movie(555, "We're the Millers", "Comedy", "01/07/2021", 25),
Movie(809, "Beauty and the Beast", "Disney", "29/06/2021", 20),
Movie(666, "American Sniper", "Action", "25/06/2021", 30),
Movie(213, "Django Unchained", "Action", "01/07/2021", 30),
|
def printMovies(self):
print("Avilable movies in our Cinema:")
j = 1
for i in self.avilableMovies :
print(str(j) + ". Name: " + i.movieName + ", Category: " + i.category + ", Date: " + i.ScreeningDate + " , Price: " + str(i.price) + "$")
j +=1
def menu():
def ExcNoneQuery(sql_command, values):
server= 'LAPTOP-K8MAK0VU\SQLEXPRESS'
database = 'Movies'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server}; \
SERVER=' + server +'; \
DATABASE='+ database +'; \
Trusted_connection=yes;')
crsr = cnxn.cursor()
crsr.execute(sql_command, values)
crsr.commit()
crsr.close()
cnxn.close()
def ExcQuery(sql_command):
server= 'LAPTOP-K8MAK0VU\SQLEXPRESS'
database = 'Movies'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server}; \
SERVER=' + server +'; \
DATABASE='+ database +'; \
Trusted_connection=yes;')
crsr = cnxn.cursor()
crsr.execute(sql_command)
for row in crsr:
print(row)
crsr.close()
cnxn.close()
window = Tk()
window.title("Nitzan & Eliran")
bg1 = PhotoImage(file=r'''C:\Users\Nitzan Gabay\OneDrive - Ruppin Academic Center\Desktop\שנה ב'\סמסטר ב\פייטון\מבחן מסכם בפייטון\Popcorn.png''')
bg1_lable = Label(window, image= bg1)
bg1_lable.place(x=0, y=0, relwidth=1, relheight=1)
lableName = Label(window, text="Name:", font=("Sitka Small", 30), bg="black", fg="white" )
lableName.grid(column=8, row=1)
userName = Entry(window,font=("Sitka Small", 20),width=5)
userName.grid(column=8, row=2)
lableBudget = Label(window, text="Budget:", font=("Sitka Small", 30), bg="black", fg="white")
lableBudget.grid(column=8, row=3)
userBudget = Entry(window, font=("Sitka Small", 20),width=5)
userBudget.grid(column=8, row=4)
lable = Label(window, text="", font=("David bold", 10))
lable.grid(column=0, row=5)
lable = Label(window, text="", font=("David bold", 10))
lable.grid(column=0, row=6)
def clickToStart():
messagebox.showinfo("Setup", "Hello " + userName.get() + "!\nWelcome to our Cinema!")
global name1
global budget1
name1 = userName.get()
budget2 = userBudget.get()
budget1 = int(budget2)
window.destroy()
bt = Button(window, text = "Submit",font=("Sitka Small", 15), bg='black', fg='white', width=10, command = clickToStart)
bt.grid(column=5, row=8)
window.geometry('480x330')
window.mainloop()
customers = Customer(name1, budget1)
cinema = Cinema()
buffet = Buffet()
def clickBtn1():
cinema.printMovies()
def clickBtn2():
customers.BuyTicket(cinema)
def clickBtn3():
buffet.printBuffet()
def clickBtn4():
customers.BuySnack(buffet)
def clickBtn5():
print("*****************\n")
print("Your final bill: ")
customers.ShowFinalBill()
def clickBtn6():
sql_command = 'Insert into Customers(Name,Budget) VALUES(?,?)'
values = (name1,budget1)
ExcNoneQuery(sql_command, values)
print("The customer " + name1 + " saved to DB")
def clickBtn7():
sql_command = 'Select * from Customers Order By Id'
ExcQuery(sql_command)
def clickBtn8():
wb = openpyxl.load_workbook(filename = './Movies.xlsx')
sheet = wb['Movies']
i = 1
for item in cinema.avilableMovies:
sheet.cell(row = i, column = 1).value = str(item.movieName)
sheet.cell(row = i, column = 2).value = str(item.category)
sheet.cell(row = i, column = 3).value = str(item.ScreeningDate)
i += 1
wb.save('Movies.xlsx')
print(str(i-1) + " Movies added to Excel file.")
def clickBtn9():
print("Bye Bye..")
newMenu.destroy()
newMenu = Tk()
newMenu.title("Nitzan & Eliran")
bg = PhotoImage(file=r'''C:\Users\Nitzan Gabay\OneDrive - Ruppin Academic Center\Desktop\שנה ב'\סמסטר ב\פייטון\מבחן מסכם בפייטון\Cinema1.png''')
bg_lable = Label(newMenu, image= bg)
bg_lable.place(x=0, y=0, relwidth=1, relheight=1)
headline = Label(newMenu, text = " Ruppin Cinema ", font=("Castellar", 49), fg="White", bg="black")
headline.pack(pady=40)
my_frame = Frame(newMenu)
my_frame.pack(pady=0)
bt1 = Button(my_frame, text = "See Avilable Movies",font=("Sitka Small", 16), bg='black', fg | Movie(617, "The Little Mermaid", "Disney", "03/07/2021", 20),
Movie(321, "The Hangover", "Comedy", "05/07/2021", 25),
Movie(893, "American Pie", "Comedy", "10/07/2021", 25),
Movie(445, "Mulan", "Disney", "07/07/2021", 20)]
| random_line_split |
micro_mlperftiny.py | to import a TFLite model from MLPerfTiny benchmark models,
compile it with TVM and generate a Zephyr project which can be flashed to a Zephyr
supported board to benchmark the model using EEMBC runner.
"""
######################################################################
#
# .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst
#
import os
import pathlib
import tarfile
import tempfile
import shutil
######################################################################
#
# .. include:: ../../../../gallery/how_to/work_with_microtvm/install_zephyr.rst
#
######################################################################
#
# **Note:** Install CMSIS-NN only if you are interested to generate this submission
# using CMSIS-NN code generator.
#
######################################################################
#
# .. include:: ../../../../gallery/how_to/work_with_microtvm/install_cmsis.rst
#
######################################################################
# Import Python dependencies
# -------------------------------
#
import tensorflow as tf
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend import Executor, Runtime
from tvm.contrib.download import download_testdata
from tvm.micro import export_model_library_format
import tvm.micro.testing
from tvm.micro.testing.utils import (
create_header_file,
mlf_extract_workspace_size_bytes,
)
######################################################################
# Import Visual Wake Word Model
# --------------------------------------------------------------------
#
# To begin with, download and import the Visual Wake Word (VWW) TFLite model from MLPerfTiny.
# This model is originally from `MLPerf Tiny repository <https://github.com/mlcommons/tiny>`_.
# We also capture metadata information from the TFLite model such as input/output name,
# quantization parameters, etc. which will be used in following steps.
#
# We use indexing for various models to build the submission. The indices are defined as follows:
# To build another model, you need to update the model URL, the short name and index number.
#
# * Keyword Spotting(KWS) 1
# * Visual Wake Word(VWW) 2
# * Anomaly Detection(AD) 3
# * Image Classification(IC) 4
#
# If you would like to build the submission with CMSIS-NN, modify USE_CMSIS environment variable.
#
# .. code-block:: bash
#
# export USE_CMSIS=1
#
MODEL_URL = "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite"
MODEL_PATH = download_testdata(MODEL_URL, "vww_96_int8.tflite", module="model")
MODEL_SHORT_NAME = "VWW"
MODEL_INDEX = 2
USE_CMSIS = os.environ.get("TVM_USE_CMSIS", False)
tflite_model_buf = open(MODEL_PATH, "rb").read()
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
interpreter = tf.lite.Interpreter(model_path=str(MODEL_PATH))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_name = input_details[0]["name"]
input_shape = tuple(input_details[0]["shape"])
input_dtype = np.dtype(input_details[0]["dtype"]).name
output_name = output_details[0]["name"]
output_shape = tuple(output_details[0]["shape"])
output_dtype = np.dtype(output_details[0]["dtype"]).name
# We extract quantization information from TFLite model.
# This is required for all models except Anomaly Detection,
# because for other models we send quantized data to interpreter
# from host, however, for AD model we send floating data and quantization
# happens on the microcontroller.
if MODEL_SHORT_NAME != "AD":
quant_output_scale = output_details[0]["quantization_parameters"]["scales"][0]
quant_output_zero_point = output_details[0]["quantization_parameters"]["zero_points"][0]
relay_mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict={input_name: input_shape}, dtype_dict={input_name: input_dtype}
)
######################################################################
# Defining Target, Runtime and Executor
# --------------------------------------------------------------------
#
# Now we need to define the target, runtime and executor to compile this model. In this tutorial,
# we use Ahead-of-Time (AoT) compilation and we build a standalone project. This is different
# than using AoT with host-driven mode where the target would communicate with host using host-driven
# AoT executor to run inference.
#
# Use the C runtime (crt)
RUNTIME = Runtime("crt")
# Use the AoT executor with `unpacked-api=True` and `interface-api=c`. `interface-api=c` forces
# the compiler to generate C type function APIs and `unpacked-api=True` forces the compiler
# to generate minimal unpacked format inputs which reduces the stack memory usage on calling
# inference layers of the model.
EXECUTOR = Executor(
"aot",
{"unpacked-api": True, "interface-api": "c", "workspace-byte-alignment": 8},
)
# Select a Zephyr board
BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi")
# Get the full target description using the BOARD
TARGET = tvm.micro.testing.get_target("zephyr", BOARD)
######################################################################
# Compile the model and export model library format
# --------------------------------------------------------------------
#
# Now, we compile the model for the target. Then, we generate model
# library format for the compiled model. We also need to calculate the
# workspace size that is required for the compiled model.
#
#
config = {"tir.disable_vectorize": True}
if USE_CMSIS:
from tvm.relay.op.contrib import cmsisnn
config["relay.ext.cmsisnn.options"] = {"mcpu": TARGET.mcpu}
relay_mod = cmsisnn.partition_for_cmsisnn(relay_mod, params, mcpu=TARGET.mcpu)
with tvm.transform.PassContext(opt_level=3, config=config):
module = tvm.relay.build(
relay_mod, target=TARGET, params=params, runtime=RUNTIME, executor=EXECUTOR
)
temp_dir = tvm.contrib.utils.tempdir()
model_tar_path = temp_dir / "model.tar"
export_model_library_format(module, model_tar_path)
workspace_size = mlf_extract_workspace_size_bytes(model_tar_path)
######################################################################
# Generate input/output header files
# --------------------------------------------------------------------
#
# To create a microTVM standalone project with AoT, we need to generate
# input and output header files. These header files are used to connect
# the input and output API from generated code to the rest of the
# standalone project. For this specific submission, we only need to generate
# output header file since the input API call is handled differently.
#
extra_tar_dir = tvm.contrib.utils.tempdir()
extra_tar_file = extra_tar_dir / "extra.tar"
with tarfile.open(extra_tar_file, "w:gz") as tf:
create_header_file(
"output_data",
np.zeros(
shape=output_shape,
dtype=output_dtype,
),
"include/tvm",
tf,
)
######################################################################
# Create the project, build and prepare the project tar file
# --------------------------------------------------------------------
#
# Now that we have the compiled model as a model library format,
# we can generate the full project using Zephyr template project. First,
# we prepare the project options, then build the project. Finally, we
# cleanup the temporary files and move the submission project to the
# current working directory which could be downloaded and used on
# your development kit.
#
input_total_size = 1
for i in range(len(input_shape)):
input_total_size *= input_shape[i]
template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr"))
project_options = {
"extra_files_tar": str(extra_tar_file),
"project_type": "mlperftiny",
"board": BOARD,
"compile_definitions": [
f"-DWORKSPACE_SIZE={workspace_size + 512}", # Memory workspace size, 512 is a temporary offset
# since the memory calculation is not accurate.
f"-DTARGET_MODEL={MODEL_INDEX}", # Sets the model index for project compilation.
f"-DTH_MODEL_VERSION=EE_MODEL_VERSION_{MODEL_SHORT_NAME}01", # Sets model version. This is required by MLPerfTiny API.
f"-DMAX_DB_INPUT_SIZE={input_total_size}", # Max size of the input data array.
],
}
if MODEL_SHORT_NAME != "AD":
project_options["compile_definitions"].append(f"-DOUT_QUANT_SCALE={quant_output_scale}")
project_options["compile_definitions"].append(f"-DOUT_QUANT_ZERO={quant_output_zero_point}")
if USE_CMSIS:
project_options["compile_definitions"].append(f"-DCOMPILE_WITH_CMSISNN=1")
# Note: You might need to adjust this based on the board that you are using.
project_options["config_main_stack_size"] = 4000
if USE_CMSIS:
| project_options["cmsis_path"] = os.environ.get("CMSIS_PATH", "/content/cmsis") | conditional_block |
|
micro_mlperftiny.py | download_testdata
from tvm.micro import export_model_library_format
import tvm.micro.testing
from tvm.micro.testing.utils import (
create_header_file,
mlf_extract_workspace_size_bytes,
)
######################################################################
# Import Visual Wake Word Model
# --------------------------------------------------------------------
#
# To begin with, download and import the Visual Wake Word (VWW) TFLite model from MLPerfTiny.
# This model is originally from `MLPerf Tiny repository <https://github.com/mlcommons/tiny>`_.
# We also capture metadata information from the TFLite model such as input/output name,
# quantization parameters, etc. which will be used in following steps.
#
# We use indexing for various models to build the submission. The indices are defined as follows:
# To build another model, you need to update the model URL, the short name and index number.
#
# * Keyword Spotting(KWS) 1
# * Visual Wake Word(VWW) 2
# * Anomaly Detection(AD) 3
# * Image Classification(IC) 4
#
# If you would like to build the submission with CMSIS-NN, modify USE_CMSIS environment variable.
#
# .. code-block:: bash
#
# export USE_CMSIS=1
#
MODEL_URL = "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite"
MODEL_PATH = download_testdata(MODEL_URL, "vww_96_int8.tflite", module="model")
MODEL_SHORT_NAME = "VWW"
MODEL_INDEX = 2
USE_CMSIS = os.environ.get("TVM_USE_CMSIS", False)
tflite_model_buf = open(MODEL_PATH, "rb").read()
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
interpreter = tf.lite.Interpreter(model_path=str(MODEL_PATH))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_name = input_details[0]["name"]
input_shape = tuple(input_details[0]["shape"])
input_dtype = np.dtype(input_details[0]["dtype"]).name
output_name = output_details[0]["name"]
output_shape = tuple(output_details[0]["shape"])
output_dtype = np.dtype(output_details[0]["dtype"]).name
# We extract quantization information from TFLite model.
# This is required for all models except Anomaly Detection,
# because for other models we send quantized data to interpreter
# from host, however, for AD model we send floating data and quantization
# happens on the microcontroller.
if MODEL_SHORT_NAME != "AD":
quant_output_scale = output_details[0]["quantization_parameters"]["scales"][0]
quant_output_zero_point = output_details[0]["quantization_parameters"]["zero_points"][0]
relay_mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict={input_name: input_shape}, dtype_dict={input_name: input_dtype}
)
######################################################################
# Defining Target, Runtime and Executor
# --------------------------------------------------------------------
#
# Now we need to define the target, runtime and executor to compile this model. In this tutorial,
# we use Ahead-of-Time (AoT) compilation and we build a standalone project. This is different
# than using AoT with host-driven mode where the target would communicate with host using host-driven
# AoT executor to run inference.
#
# Use the C runtime (crt)
RUNTIME = Runtime("crt")
# Use the AoT executor with `unpacked-api=True` and `interface-api=c`. `interface-api=c` forces
# the compiler to generate C type function APIs and `unpacked-api=True` forces the compiler
# to generate minimal unpacked format inputs which reduces the stack memory usage on calling
# inference layers of the model.
EXECUTOR = Executor(
"aot",
{"unpacked-api": True, "interface-api": "c", "workspace-byte-alignment": 8},
)
# Select a Zephyr board
BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi")
# Get the full target description using the BOARD
TARGET = tvm.micro.testing.get_target("zephyr", BOARD)
######################################################################
# Compile the model and export model library format
# --------------------------------------------------------------------
#
# Now, we compile the model for the target. Then, we generate model
# library format for the compiled model. We also need to calculate the
# workspace size that is required for the compiled model.
#
#
config = {"tir.disable_vectorize": True}
if USE_CMSIS:
from tvm.relay.op.contrib import cmsisnn
config["relay.ext.cmsisnn.options"] = {"mcpu": TARGET.mcpu}
relay_mod = cmsisnn.partition_for_cmsisnn(relay_mod, params, mcpu=TARGET.mcpu)
with tvm.transform.PassContext(opt_level=3, config=config):
module = tvm.relay.build(
relay_mod, target=TARGET, params=params, runtime=RUNTIME, executor=EXECUTOR
)
temp_dir = tvm.contrib.utils.tempdir()
model_tar_path = temp_dir / "model.tar"
export_model_library_format(module, model_tar_path)
workspace_size = mlf_extract_workspace_size_bytes(model_tar_path)
######################################################################
# Generate input/output header files
# --------------------------------------------------------------------
#
# To create a microTVM standalone project with AoT, we need to generate
# input and output header files. These header files are used to connect
# the input and output API from generated code to the rest of the
# standalone project. For this specific submission, we only need to generate
# output header file since the input API call is handled differently.
#
extra_tar_dir = tvm.contrib.utils.tempdir()
extra_tar_file = extra_tar_dir / "extra.tar"
with tarfile.open(extra_tar_file, "w:gz") as tf:
create_header_file(
"output_data",
np.zeros(
shape=output_shape,
dtype=output_dtype,
),
"include/tvm",
tf,
)
######################################################################
# Create the project, build and prepare the project tar file
# --------------------------------------------------------------------
#
# Now that we have the compiled model as a model library format,
# we can generate the full project using Zephyr template project. First,
# we prepare the project options, then build the project. Finally, we
# cleanup the temporary files and move the submission project to the
# current working directory which could be downloaded and used on
# your development kit.
#
input_total_size = 1
for i in range(len(input_shape)):
input_total_size *= input_shape[i]
template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr"))
project_options = {
"extra_files_tar": str(extra_tar_file),
"project_type": "mlperftiny",
"board": BOARD,
"compile_definitions": [
f"-DWORKSPACE_SIZE={workspace_size + 512}", # Memory workspace size, 512 is a temporary offset
# since the memory calculation is not accurate.
f"-DTARGET_MODEL={MODEL_INDEX}", # Sets the model index for project compilation.
f"-DTH_MODEL_VERSION=EE_MODEL_VERSION_{MODEL_SHORT_NAME}01", # Sets model version. This is required by MLPerfTiny API.
f"-DMAX_DB_INPUT_SIZE={input_total_size}", # Max size of the input data array.
],
}
if MODEL_SHORT_NAME != "AD":
project_options["compile_definitions"].append(f"-DOUT_QUANT_SCALE={quant_output_scale}")
project_options["compile_definitions"].append(f"-DOUT_QUANT_ZERO={quant_output_zero_point}")
if USE_CMSIS:
project_options["compile_definitions"].append(f"-DCOMPILE_WITH_CMSISNN=1")
# Note: You might need to adjust this based on the board that you are using.
project_options["config_main_stack_size"] = 4000
if USE_CMSIS:
project_options["cmsis_path"] = os.environ.get("CMSIS_PATH", "/content/cmsis")
generated_project_dir = temp_dir / "project"
project = tvm.micro.project.generate_project_from_mlf(
template_project_path, generated_project_dir, model_tar_path, project_options
)
project.build()
# Cleanup the build directory and extra artifacts
shutil.rmtree(generated_project_dir / "build")
(generated_project_dir / "model.tar").unlink()
project_tar_path = pathlib.Path(os.getcwd()) / "project.tar"
with tarfile.open(project_tar_path, "w:tar") as tar:
tar.add(generated_project_dir, arcname=os.path.basename("project"))
print(f"The generated project is located here: {project_tar_path}")
######################################################################
# Use this project with your board
# --------------------------------------------------------------------
#
# Now that we have the generated project, you can use this project locally
# to flash your board and prepare it for EEMBC runner software.
# To do this follow these steps:
#
# .. code-block:: bash
#
# tar -xf project.tar
# cd project
# mkdir build | # cmake ..
# make -j2 | random_line_split |
|
client.go | for _, value := range strings.Split(strings.TrimSpace(received_data), common.MESSAGE_DELIMITER) {
// // fmt.Println("parse received:", value)
// // if len(value) > 0 && isValidString(value) {
// // // Check if the end of the message is "end." Otherwise this is a partial message and you must wait for the rest
// // if value[len(value)-1:] == common.MESSAGE_ENDER {
// // output <- message + value
// // message = ""
// // } else {
// // message = message + value
// // }
// // }
// // }
// }
// }
// input := make(chan string, 1000)
// tunnel := make(chan string, 1000)
// go handleMessage(input, tunnel, signal)
// go parseMessage(input, result, signal)
for {
p := make([]byte, 55)
_, err := c.Read(p)
if err == nil {
signal <-true
// input <- string(p)
}
// temp := (strings.Split(string( p ), "*"))[1]
// fmt.Println("Temp:", temp)
// latency_time, _ := strconv.ParseFloat( strings.Split(temp, "|")[0], 64)
}
}
func client_thread(client_id string, zone string, num_t int, txns []string, summation_ch chan Latencies, start_signal <-chan bool, done_ch chan<- bool) {
var addresses []Address
file2, _ := ioutil.ReadFile("testing/primaries.json")
_ = json.Unmarshal([]byte(file2), &addresses)
// Make a map to use either your zone primmary or primary 0
directory := make(map[string]net.Conn)
// signal := make(chan bool)
for j := 0; j < len(addresses); j++ {
// if addresses[j].Zone == zone {
// conn2, err := net.Dial("tcp", addresses[j].Ip + ":" + addresses[j].Port)
// if err != nil {
// fmt.Println(err)
// return
// }
// directory["local"] = conn2
// p := make([]byte, 1024)
// _, err = conn2.Read(p)
// // fmt.Println("Received:", string(p))
// go handleConnection(conn2, summation_ch, signal)
// } else {
conn2, err := net.Dial("tcp", addresses[j].Ip + ":" + addresses[j].Port)
if err != nil {
fmt.Println(err)
return
}
directory[ addresses[j].Zone ] = conn2
p := make([]byte, 1024)
_, err = conn2.Read(p)
// fmt.Println("Received:", string(p))
// go handleConnection(conn2, summation_ch, signal)
// }
}
// Read the start signal
<-start_signal
// for all_start {}
// fmt.Println("Got signal, starting now")
if num_t != len(txns) {
fmt.Println("num_t and txns not the same", num_t, txns)
}
// client_starttime := time.Now()
previous_zone := "-1"
p := make([]byte, 1024)
for i := 0; i < num_t; i++ {
// p := make([]byte, 512)
i_str := strconv.Itoa(i)
txn_type := txns[i][0:1]
client_request := common.MESSAGE_DELIMITER + "CLIENT_REQUEST|" + client_id + "!" + i_str + "!10"
// start := time.Now()
// fmt.Println("Starting :" + client_id + "!" + i_str + "!10")
// start := time.Now())
if txn_type == "l" {
client_request += "!l|" + common.MESSAGE_ENDER + common.MESSAGE_DELIMITER
// directory["local"].Write([]byte(client_request))
directory[zone].Write([]byte(client_request))
// fmt.Fprintf(directory["local"], client_request)
// _, err = bufio.NewReader(directory["local"]).Read(p)
// if err == nil {
// // fmt.Printf("%s\n", p)
// } else {
// fmt.Printf("Some error %v\n", err)
// }
// fmt.Println("Received", string(p), "l")
previous_zone = zone
} else {
global_zone := txn_type
if previous_zone != global_zone {
// A leader election is needed
client_request += "!G"
} else {
client_request += "!g"
}
client_request += "|" + common.MESSAGE_ENDER + common.MESSAGE_DELIMITER
zone = global_zone
directory[global_zone].Write([]byte(client_request))
// fmt.Fprintf(directory["global"], client_request)
// _, err = bufio.NewReader(directory["global"]).Read(p)
// if err == nil {
// // fmt.Printf("%s\n", p)
// } else {
// fmt.Printf("Some error %v\n", err)
// }
// fmt.Println("Received", string(p), "g")
previous_zone = global_zone
}
directory[zone].Read(p)
// fmt.Println("Done:", string(p))
// <-signal
// fmt.Println("Signal received to start next", client_id)
// temp := (strings.Split(string( p ), "*"))[1]
// fmt.Println("Temp:", temp)
// latency_time, _ := strconv.ParseFloat( strings.Split(temp, "|")[0], 64)
// if err == nil && latency_time > 0 {
// // difference := end.Sub(start)
// // total_time := difference.Seconds()
// lock_mutex.Lock()
// l.times = append(l.times, latency_time)
// lock_mutex.Unlock()
// summation_ch <-latency_time
// } else {
// fmt.Println("Failure on", client_request, latency_time)
// }
}
// lock_mutex.Lock()
// l.client_start = client_starttime
// lock_mutex.Unlock()
// ch <- l
for _, v := range directory {
v.Close()
}
// directory["local"].Close()
done_ch <- true
}
type FinalResult struct {
total_latencies float64
num_successes int
earliest int
latest int
}
func | (num_t int, ch chan Latencies, exit chan FinalResult) {
total := 0.0
num_successes := 0
earliest := 0
latest := 0
var nums[1000000] float64
var newval Latencies
for i := 0; i < num_t; i++ {
newval = <- ch
// fmt.Println(i)
if newval.time > 0 {
total += newval.time
num_successes++
val, _ := strconv.Atoi(newval.start)
if earliest == 0 || val < earliest {
earliest = val
}
val, _ = strconv.Atoi(newval.end)
if val > latest {
latest = val
}
}
nums[i] = newval.time
}
// Calculate standard deviation
var sd float64
mean := total / float64(num_successes)
for j := 0; j < num_t; j++ {
if nums[j] > 0 {
sd += math.Pow( nums[j] - mean, 2 )
}
}
sd = math.Sqrt(sd / float64(num_successes))
fmt.Println("Mean:", mean)
fmt.Println("StdDev:", sd)
exit <- FinalResult {
total_latencies: total,
num_successes: num_successes,
earliest: earliest,
latest: latest,
}
}
func main() {
num_c := 10
num_t := 10
zone := "0"
client_id_seed := 0
// percent := 0.5
ip_addr := "127.0.0.1"
port := 8000
argsWithoutProg := os.Args[1:]
for i, s := range argsWithoutProg {
switch s {
case "-a":
ip_addr = argsWithoutProg[i + 1]
case "-p":
new_p, err := strconv.Atoi(argsWithoutProg[i + 1])
if err == nil {
port = new_p
}
}
}
p := make([]byte, 2049)
addr := net.UDPAddr{
Port: port,
IP: net.ParseIP(ip_addr),
}
serudp, err := net.ListenUDP("udp", &addr)
if | summation | identifier_name |
client.go | _, value := range strings.Split(strings.TrimSpace(received_data), common.MESSAGE_DELIMITER) {
// // fmt.Println("parse received:", value)
// // if len(value) > 0 && isValidString(value) {
// // // Check if the end of the message is "end." Otherwise this is a partial message and you must wait for the rest
// // if value[len(value)-1:] == common.MESSAGE_ENDER {
// // output <- message + value
// // message = ""
// // } else {
// // message = message + value
// // }
// // }
// // }
// }
// }
// input := make(chan string, 1000)
// tunnel := make(chan string, 1000)
// go handleMessage(input, tunnel, signal)
// go parseMessage(input, result, signal)
for {
p := make([]byte, 55)
_, err := c.Read(p)
if err == nil {
signal <-true
// input <- string(p)
}
// temp := (strings.Split(string( p ), "*"))[1]
// fmt.Println("Temp:", temp)
// latency_time, _ := strconv.ParseFloat( strings.Split(temp, "|")[0], 64)
}
}
func client_thread(client_id string, zone string, num_t int, txns []string, summation_ch chan Latencies, start_signal <-chan bool, done_ch chan<- bool) {
var addresses []Address
file2, _ := ioutil.ReadFile("testing/primaries.json")
_ = json.Unmarshal([]byte(file2), &addresses)
// Make a map to use either your zone primmary or primary 0
directory := make(map[string]net.Conn)
// signal := make(chan bool)
for j := 0; j < len(addresses); j++ {
// if addresses[j].Zone == zone {
// conn2, err := net.Dial("tcp", addresses[j].Ip + ":" + addresses[j].Port)
// if err != nil {
// fmt.Println(err)
// return
// }
// directory["local"] = conn2
// p := make([]byte, 1024)
// _, err = conn2.Read(p)
// // fmt.Println("Received:", string(p))
// go handleConnection(conn2, summation_ch, signal)
// } else {
conn2, err := net.Dial("tcp", addresses[j].Ip + ":" + addresses[j].Port)
if err != nil {
fmt.Println(err)
return
}
directory[ addresses[j].Zone ] = conn2
p := make([]byte, 1024)
_, err = conn2.Read(p)
// fmt.Println("Received:", string(p))
// go handleConnection(conn2, summation_ch, signal)
// }
}
// Read the start signal
<-start_signal
// for all_start {}
// fmt.Println("Got signal, starting now")
if num_t != len(txns) {
fmt.Println("num_t and txns not the same", num_t, txns)
}
// client_starttime := time.Now()
previous_zone := "-1"
p := make([]byte, 1024)
for i := 0; i < num_t; i++ {
// p := make([]byte, 512)
i_str := strconv.Itoa(i)
txn_type := txns[i][0:1]
client_request := common.MESSAGE_DELIMITER + "CLIENT_REQUEST|" + client_id + "!" + i_str + "!10"
// start := time.Now()
// fmt.Println("Starting :" + client_id + "!" + i_str + "!10")
// start := time.Now())
if txn_type == "l" {
client_request += "!l|" + common.MESSAGE_ENDER + common.MESSAGE_DELIMITER
// directory["local"].Write([]byte(client_request))
directory[zone].Write([]byte(client_request))
// fmt.Fprintf(directory["local"], client_request)
// _, err = bufio.NewReader(directory["local"]).Read(p)
// if err == nil {
// // fmt.Printf("%s\n", p)
// } else {
// fmt.Printf("Some error %v\n", err)
// }
// fmt.Println("Received", string(p), "l")
previous_zone = zone
} else {
global_zone := txn_type
if previous_zone != global_zone {
// A leader election is needed
client_request += "!G"
} else {
client_request += "!g"
}
client_request += "|" + common.MESSAGE_ENDER + common.MESSAGE_DELIMITER
zone = global_zone
directory[global_zone].Write([]byte(client_request))
// fmt.Fprintf(directory["global"], client_request)
// _, err = bufio.NewReader(directory["global"]).Read(p)
// if err == nil {
// // fmt.Printf("%s\n", p)
// } else {
// fmt.Printf("Some error %v\n", err)
// }
// fmt.Println("Received", string(p), "g")
previous_zone = global_zone
}
directory[zone].Read(p)
// fmt.Println("Done:", string(p))
// <-signal
// fmt.Println("Signal received to start next", client_id)
// temp := (strings.Split(string( p ), "*"))[1]
// fmt.Println("Temp:", temp)
// latency_time, _ := strconv.ParseFloat( strings.Split(temp, "|")[0], 64)
// if err == nil && latency_time > 0 {
// // difference := end.Sub(start)
// // total_time := difference.Seconds()
// lock_mutex.Lock()
// l.times = append(l.times, latency_time)
// lock_mutex.Unlock()
// summation_ch <-latency_time
// } else {
// fmt.Println("Failure on", client_request, latency_time)
// }
}
// lock_mutex.Lock()
// l.client_start = client_starttime
// lock_mutex.Unlock()
// ch <- l
for _, v := range directory {
v.Close()
}
// directory["local"].Close()
done_ch <- true
}
type FinalResult struct {
total_latencies float64
num_successes int
earliest int
latest int
}
func summation(num_t int, ch chan Latencies, exit chan FinalResult) {
total := 0.0
num_successes := 0
earliest := 0
latest := 0
var nums[1000000] float64
var newval Latencies
for i := 0; i < num_t; i++ |
// Calculate standard deviation
var sd float64
mean := total / float64(num_successes)
for j := 0; j < num_t; j++ {
if nums[j] > 0 {
sd += math.Pow( nums[j] - mean, 2 )
}
}
sd = math.Sqrt(sd / float64(num_successes))
fmt.Println("Mean:", mean)
fmt.Println("StdDev:", sd)
exit <- FinalResult {
total_latencies: total,
num_successes: num_successes,
earliest: earliest,
latest: latest,
}
}
func main() {
num_c := 10
num_t := 10
zone := "0"
client_id_seed := 0
// percent := 0.5
ip_addr := "127.0.0.1"
port := 8000
argsWithoutProg := os.Args[1:]
for i, s := range argsWithoutProg {
switch s {
case "-a":
ip_addr = argsWithoutProg[i + 1]
case "-p":
new_p, err := strconv.Atoi(argsWithoutProg[i + 1])
if err == nil {
port = new_p
}
}
}
p := make([]byte, 2049)
addr := net.UDPAddr{
Port: port,
IP: net.ParseIP(ip_addr),
}
serudp, err := net.ListenUDP("udp", &addr)
if | {
newval = <- ch
// fmt.Println(i)
if newval.time > 0 {
total += newval.time
num_successes++
val, _ := strconv.Atoi(newval.start)
if earliest == 0 || val < earliest {
earliest = val
}
val, _ = strconv.Atoi(newval.end)
if val > latest {
latest = val
}
}
nums[i] = newval.time
} | conditional_block |
client.go | for _, value := range strings.Split(strings.TrimSpace(received_data), common.MESSAGE_DELIMITER) {
// // fmt.Println("parse received:", value)
// // if len(value) > 0 && isValidString(value) {
// // // Check if the end of the message is "end." Otherwise this is a partial message and you must wait for the rest
// // if value[len(value)-1:] == common.MESSAGE_ENDER {
// // output <- message + value
// // message = ""
// // } else {
// // message = message + value
// // }
// // }
// // }
// }
// }
// input := make(chan string, 1000)
// tunnel := make(chan string, 1000)
// go handleMessage(input, tunnel, signal)
// go parseMessage(input, result, signal)
for {
p := make([]byte, 55)
_, err := c.Read(p)
if err == nil {
signal <-true
// input <- string(p)
}
// temp := (strings.Split(string( p ), "*"))[1]
// fmt.Println("Temp:", temp)
// latency_time, _ := strconv.ParseFloat( strings.Split(temp, "|")[0], 64)
}
}
func client_thread(client_id string, zone string, num_t int, txns []string, summation_ch chan Latencies, start_signal <-chan bool, done_ch chan<- bool) {
var addresses []Address
file2, _ := ioutil.ReadFile("testing/primaries.json")
_ = json.Unmarshal([]byte(file2), &addresses)
// Make a map to use either your zone primmary or primary 0
directory := make(map[string]net.Conn)
// signal := make(chan bool)
for j := 0; j < len(addresses); j++ {
// if addresses[j].Zone == zone {
// conn2, err := net.Dial("tcp", addresses[j].Ip + ":" + addresses[j].Port)
// if err != nil {
// fmt.Println(err)
// return
// }
// directory["local"] = conn2
// p := make([]byte, 1024)
// _, err = conn2.Read(p)
// // fmt.Println("Received:", string(p))
// go handleConnection(conn2, summation_ch, signal)
// } else {
conn2, err := net.Dial("tcp", addresses[j].Ip + ":" + addresses[j].Port)
if err != nil {
fmt.Println(err)
return
}
directory[ addresses[j].Zone ] = conn2
p := make([]byte, 1024)
_, err = conn2.Read(p)
// fmt.Println("Received:", string(p))
// go handleConnection(conn2, summation_ch, signal)
// }
}
// Read the start signal
<-start_signal
// for all_start {}
// fmt.Println("Got signal, starting now")
if num_t != len(txns) {
fmt.Println("num_t and txns not the same", num_t, txns)
}
// client_starttime := time.Now()
previous_zone := "-1"
p := make([]byte, 1024)
for i := 0; i < num_t; i++ {
// p := make([]byte, 512)
i_str := strconv.Itoa(i)
txn_type := txns[i][0:1]
client_request := common.MESSAGE_DELIMITER + "CLIENT_REQUEST|" + client_id + "!" + i_str + "!10"
// start := time.Now()
// fmt.Println("Starting :" + client_id + "!" + i_str + "!10")
// start := time.Now())
if txn_type == "l" {
client_request += "!l|" + common.MESSAGE_ENDER + common.MESSAGE_DELIMITER
// directory["local"].Write([]byte(client_request))
directory[zone].Write([]byte(client_request))
// fmt.Fprintf(directory["local"], client_request)
// _, err = bufio.NewReader(directory["local"]).Read(p)
// if err == nil {
// // fmt.Printf("%s\n", p)
// } else {
// fmt.Printf("Some error %v\n", err)
// }
// fmt.Println("Received", string(p), "l")
previous_zone = zone
} else {
global_zone := txn_type
if previous_zone != global_zone {
// A leader election is needed
client_request += "!G"
} else {
client_request += "!g"
}
client_request += "|" + common.MESSAGE_ENDER + common.MESSAGE_DELIMITER
zone = global_zone
directory[global_zone].Write([]byte(client_request))
// fmt.Fprintf(directory["global"], client_request)
// _, err = bufio.NewReader(directory["global"]).Read(p)
// if err == nil {
// // fmt.Printf("%s\n", p)
// } else {
// fmt.Printf("Some error %v\n", err)
// }
// fmt.Println("Received", string(p), "g")
previous_zone = global_zone
}
directory[zone].Read(p)
// fmt.Println("Done:", string(p))
// <-signal
// fmt.Println("Signal received to start next", client_id)
// temp := (strings.Split(string( p ), "*"))[1]
// fmt.Println("Temp:", temp)
// latency_time, _ := strconv.ParseFloat( strings.Split(temp, "|")[0], 64)
// if err == nil && latency_time > 0 {
// // difference := end.Sub(start)
// // total_time := difference.Seconds()
// lock_mutex.Lock()
// l.times = append(l.times, latency_time)
// lock_mutex.Unlock()
// summation_ch <-latency_time
// } else {
// fmt.Println("Failure on", client_request, latency_time)
// }
}
// lock_mutex.Lock()
// l.client_start = client_starttime
// lock_mutex.Unlock()
// ch <- l
for _, v := range directory {
v.Close()
}
// directory["local"].Close()
done_ch <- true
}
type FinalResult struct {
total_latencies float64
num_successes int
earliest int
latest int
}
func summation(num_t int, ch chan Latencies, exit chan FinalResult) | latest = val
}
}
nums[i] = newval.time
}
// Calculate standard deviation
var sd float64
mean := total / float64(num_successes)
for j := 0; j < num_t; j++ {
if nums[j] > 0 {
sd += math.Pow( nums[j] - mean, 2 )
}
}
sd = math.Sqrt(sd / float64(num_successes))
fmt.Println("Mean:", mean)
fmt.Println("StdDev:", sd)
exit <- FinalResult {
total_latencies: total,
num_successes: num_successes,
earliest: earliest,
latest: latest,
}
}
func main() {
num_c := 10
num_t := 10
zone := "0"
client_id_seed := 0
// percent := 0.5
ip_addr := "127.0.0.1"
port := 8000
argsWithoutProg := os.Args[1:]
for i, s := range argsWithoutProg {
switch s {
case "-a":
ip_addr = argsWithoutProg[i + 1]
case "-p":
new_p, err := strconv.Atoi(argsWithoutProg[i + 1])
if err == nil {
port = new_p
}
}
}
p := make([]byte, 2049)
addr := net.UDPAddr{
Port: port,
IP: net.ParseIP(ip_addr),
}
serudp, err := net.ListenUDP("udp", &addr)
if | {
total := 0.0
num_successes := 0
earliest := 0
latest := 0
var nums[1000000] float64
var newval Latencies
for i := 0; i < num_t; i++ {
newval = <- ch
// fmt.Println(i)
if newval.time > 0 {
total += newval.time
num_successes++
val, _ := strconv.Atoi(newval.start)
if earliest == 0 || val < earliest {
earliest = val
}
val, _ = strconv.Atoi(newval.end)
if val > latest { | identifier_body |
client.go | for _, value := range strings.Split(strings.TrimSpace(received_data), common.MESSAGE_DELIMITER) {
// // fmt.Println("parse received:", value)
// // if len(value) > 0 && isValidString(value) {
// // // Check if the end of the message is "end." Otherwise this is a partial message and you must wait for the rest
// // if value[len(value)-1:] == common.MESSAGE_ENDER {
// // output <- message + value
// // message = ""
// // } else {
// // message = message + value
// // }
// // }
// // }
// }
// }
// input := make(chan string, 1000)
// tunnel := make(chan string, 1000)
// go handleMessage(input, tunnel, signal)
// go parseMessage(input, result, signal)
for {
p := make([]byte, 55)
_, err := c.Read(p)
if err == nil {
signal <-true
// input <- string(p)
}
// temp := (strings.Split(string( p ), "*"))[1]
// fmt.Println("Temp:", temp)
// latency_time, _ := strconv.ParseFloat( strings.Split(temp, "|")[0], 64)
}
}
func client_thread(client_id string, zone string, num_t int, txns []string, summation_ch chan Latencies, start_signal <-chan bool, done_ch chan<- bool) {
var addresses []Address
file2, _ := ioutil.ReadFile("testing/primaries.json")
_ = json.Unmarshal([]byte(file2), &addresses)
// Make a map to use either your zone primmary or primary 0
directory := make(map[string]net.Conn)
// signal := make(chan bool)
for j := 0; j < len(addresses); j++ {
// if addresses[j].Zone == zone {
// conn2, err := net.Dial("tcp", addresses[j].Ip + ":" + addresses[j].Port)
// if err != nil {
// fmt.Println(err)
// return
// }
// directory["local"] = conn2
// p := make([]byte, 1024)
// _, err = conn2.Read(p)
// // fmt.Println("Received:", string(p))
// go handleConnection(conn2, summation_ch, signal)
// } else {
conn2, err := net.Dial("tcp", addresses[j].Ip + ":" + addresses[j].Port)
if err != nil {
fmt.Println(err)
return
}
directory[ addresses[j].Zone ] = conn2
p := make([]byte, 1024)
_, err = conn2.Read(p)
// fmt.Println("Received:", string(p))
// go handleConnection(conn2, summation_ch, signal)
// }
}
// Read the start signal
<-start_signal
// for all_start {}
// fmt.Println("Got signal, starting now")
if num_t != len(txns) {
fmt.Println("num_t and txns not the same", num_t, txns)
}
// client_starttime := time.Now()
previous_zone := "-1"
p := make([]byte, 1024)
for i := 0; i < num_t; i++ {
// p := make([]byte, 512)
i_str := strconv.Itoa(i)
txn_type := txns[i][0:1] | client_request := common.MESSAGE_DELIMITER + "CLIENT_REQUEST|" + client_id + "!" + i_str + "!10"
// start := time.Now()
// fmt.Println("Starting :" + client_id + "!" + i_str + "!10")
// start := time.Now())
if txn_type == "l" {
client_request += "!l|" + common.MESSAGE_ENDER + common.MESSAGE_DELIMITER
// directory["local"].Write([]byte(client_request))
directory[zone].Write([]byte(client_request))
// fmt.Fprintf(directory["local"], client_request)
// _, err = bufio.NewReader(directory["local"]).Read(p)
// if err == nil {
// // fmt.Printf("%s\n", p)
// } else {
// fmt.Printf("Some error %v\n", err)
// }
// fmt.Println("Received", string(p), "l")
previous_zone = zone
} else {
global_zone := txn_type
if previous_zone != global_zone {
// A leader election is needed
client_request += "!G"
} else {
client_request += "!g"
}
client_request += "|" + common.MESSAGE_ENDER + common.MESSAGE_DELIMITER
zone = global_zone
directory[global_zone].Write([]byte(client_request))
// fmt.Fprintf(directory["global"], client_request)
// _, err = bufio.NewReader(directory["global"]).Read(p)
// if err == nil {
// // fmt.Printf("%s\n", p)
// } else {
// fmt.Printf("Some error %v\n", err)
// }
// fmt.Println("Received", string(p), "g")
previous_zone = global_zone
}
directory[zone].Read(p)
// fmt.Println("Done:", string(p))
// <-signal
// fmt.Println("Signal received to start next", client_id)
// temp := (strings.Split(string( p ), "*"))[1]
// fmt.Println("Temp:", temp)
// latency_time, _ := strconv.ParseFloat( strings.Split(temp, "|")[0], 64)
// if err == nil && latency_time > 0 {
// // difference := end.Sub(start)
// // total_time := difference.Seconds()
// lock_mutex.Lock()
// l.times = append(l.times, latency_time)
// lock_mutex.Unlock()
// summation_ch <-latency_time
// } else {
// fmt.Println("Failure on", client_request, latency_time)
// }
}
// lock_mutex.Lock()
// l.client_start = client_starttime
// lock_mutex.Unlock()
// ch <- l
for _, v := range directory {
v.Close()
}
// directory["local"].Close()
done_ch <- true
}
type FinalResult struct {
total_latencies float64
num_successes int
earliest int
latest int
}
func summation(num_t int, ch chan Latencies, exit chan FinalResult) {
total := 0.0
num_successes := 0
earliest := 0
latest := 0
var nums[1000000] float64
var newval Latencies
for i := 0; i < num_t; i++ {
newval = <- ch
// fmt.Println(i)
if newval.time > 0 {
total += newval.time
num_successes++
val, _ := strconv.Atoi(newval.start)
if earliest == 0 || val < earliest {
earliest = val
}
val, _ = strconv.Atoi(newval.end)
if val > latest {
latest = val
}
}
nums[i] = newval.time
}
// Calculate standard deviation
var sd float64
mean := total / float64(num_successes)
for j := 0; j < num_t; j++ {
if nums[j] > 0 {
sd += math.Pow( nums[j] - mean, 2 )
}
}
sd = math.Sqrt(sd / float64(num_successes))
fmt.Println("Mean:", mean)
fmt.Println("StdDev:", sd)
exit <- FinalResult {
total_latencies: total,
num_successes: num_successes,
earliest: earliest,
latest: latest,
}
}
func main() {
num_c := 10
num_t := 10
zone := "0"
client_id_seed := 0
// percent := 0.5
ip_addr := "127.0.0.1"
port := 8000
argsWithoutProg := os.Args[1:]
for i, s := range argsWithoutProg {
switch s {
case "-a":
ip_addr = argsWithoutProg[i + 1]
case "-p":
new_p, err := strconv.Atoi(argsWithoutProg[i + 1])
if err == nil {
port = new_p
}
}
}
p := make([]byte, 2049)
addr := net.UDPAddr{
Port: port,
IP: net.ParseIP(ip_addr),
}
serudp, err := net.ListenUDP("udp", &addr)
if err | random_line_split |
|
shopee.js | ) {
if (accountType === 'subaccount') {
// 子账号登录需要SPC_CDS,要不可能出错
jar.setCookieSync(cookie2str({
name: 'SPC_CDS',
value: SPC_CDS,
domain: host,
path: '/'
}, host), `https://${host}/`)
}
// 植入cookie, 保证本土店铺使用原有的
cookies.forEach(item => {
let domain = item.domain
if (('.' + host).includes(domain)) {
// 提交SPC_SC_UD 会导致登录报400错误
// if (keyCookieNames.indexOf(item.name) === -1 && item.value) {
if (item.name === 'SPC_F') {
jar.setCookieSync(cookie2str(item, host), `https://${host}/`)
}
} else {
// 例如.shopee.com 登录子母账号的cookie信息
if (domain.match(/shopee\.com$/)) {
subaccountCookies.push(item)
jar.setCookieSync(cookie2str(item, 'seller.shopee.com'), 'https://account.seller.shopee.com/')
}
}
})
}
const getCookie = () => {
return new Promise((resolve, reject) => {
jar.getCookies(`https://${host}/`, (err, cookies) => {
if (err) {
reject(err)
} else {
let names = []
let list = []
cookies.forEach(item => {
if (!item.value) {
// value不存在时,返回
return
}
let domain = item.domain || host
if (domain && domain.startsWith('shopee')) {
domain = '.' + domain
}
let ret = {
name: item.key,
value: item.value || '',
domain,
path: item.path,
secure: item.secure,
httpOnly: item.httpOnly,
hostOnly: item.hostOnly,
}
if (item.expires) {
ret.expirationDate = (new Date(item.expires)).getTime() / 1000
} else {
ret.session = true
}
let index = names.indexOf(ret.name)
if (index !== -1) {
// SPC_SC_TK存在两个, seller.my.shopee.cn这个域下的值为空。
// my.shopee.cn下的有值
if (!list[index].value) {
list.splice(index, 1)
}
}
names.push(ret.name)
list.push(ret)
})
resolve(list.concat(subaccountCookies))
}
})
})
}
const getFingerprint = () => {
return new Promise((resolve, reject) => {
if (accountType === 'subaccount') {
http.get('https://account.seller.shopee.com/api/get_fingerprint/').then(({ data: res }) => {
resolve(res.fingerprint)
}, reject)
} else {
resolve()
}
})
}
const loginSig = (shopId) => {
return new Promise((resolve, reject) => {
if (accountType === 'subaccount') {
console.log('getSIG', shopId)
// 当前请求403应该是少一个cookie
http.get(`https://${host}/api/selleraccount/subaccount/get_sig/`, {
params: {
SPC_CDS,
SPC_CDS_VER: 2,
target_shop_id: shopId
}
}).then(({ data: res }) => {
if (res.code === 0) {
let sigs = res.url.match(/sig=([^&]+)/)
if (sigs) {
// let sig = decodeURIComponent(sigs[1])
console.log('sig', decodeURIComponent(sigs[1]))
http.post(`https://${host}/api/v2/login/`, `sig=${sigs[1]}`, {
headers: {
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
referer: res.url
}
}).then(({ data: res2 }) => {
/*
{
"username": "xin996.my",
"shopid": 195533672,
"phone": "0000432692937569",
"sso": null,
"cs_token": null,
"portrait": "4e816700547c6cdcffb87d41fd0f259b",
"id": 195536606,
"errcode": 0,
"token": "sa_baaaff47f2f3af36250a223f3b4dd87f",
"subaccount_id": 387498,
"sub_account_token": "Nx0gl0ysUK8Hzq8LNHpiBWV8bmUG/gI86so+9biXTyD2bu8eXmkregjUmVxM0DpL",
"email": "[email protected]"
}
*/
if (res2.errcode === 0) {
console.log('siglogin: ok')
resolve(res2)
} else {
reject(new Error(`siglogin: ${res2.errcode}, ${res2.errmsg || res2.message}`))
}
}, reject)
| reject(new Error(`sigget: ${res.code}, ${res.message}`))
}
}, reject)
} else {
resolve()
}
})
}
return new Promise((resolve, reject) => {
getFingerprint().then(fingerprint => {
let captcha_key = createCaptcheKey()
// 密码先md5,再 sha256
if (accountType !== 'subaccount') {
// 用户名、邮箱、手机登录需要再加密
// 子账号登录只需要md5
password_hash = cryptojs.SHA256(password_hash).toString()
if (accountType === 'phone') {
username = transformPhone(username, country)
}
}
let data = `captcha=&captcha_key=${captcha_key}&remember=false&password_hash=${password_hash}&${accountType}=${username}`
if (accountType === 'subaccount') {
data += `&fingerprint=${fingerprint}`
}
console.log('post', data)
http.post(`https://${host}/api/v2/login/`, data).then(({ data: user }) => {
/*
子账号返回信息
{
username: 'ahynhhblc:suxiaoyi',
account_type: 'sub_merchant',
shopid: 195533672,
phone: '+8615058100276',
sso: 'wc+t2ht6DLsjQm7v3C9EARMQ6oIpFjj0iHofZSXgyb7D75kxYuU+hBX+SgOQt2if',
portrait: '',
id: 0,
nick_name: 'sxytest',
main_account_id: 250327,
errcode: 0,
token: 'bd83f479e53cc0b5d4161462580ff680',
subaccount_id: 387498,
email: ''
}
*/
if (user.errcode !== 0) {
return reject(user.errcode)
}
let shop = {
user,
host,
shopId: user.shopid,
shopName: user.username,
userId: user.id,
}
if (accountType === 'subaccount') {
shop.userId = user.subaccount_id || user.main_account_id
}
console.log('login.okkk', user)
loginSig(shop.shopId).then(() => {
http.post(`https://${host}/webchat/api/v1.2/login`).then(({ data: res }) => {
console.log(res)
shop.uid = res.user.uid
shop.token = res.token
shop.socketToken = res.p_token
shop.status = res.user.status || 'normal'
shop.country = (res.user.country || country).toLowerCase()
getCookie().then(cookies2 => {
shop.cookies = cookies2
resolve(shop)
}, reject)
}, reject)
}, reject)
}, reject)
}, reject)
})
},
// cookies [{name, value, key}]
authorize({ host, username, password, device_id, ip, cookies }, agent) {
const jar = new tough.CookieJar(null, {
ignoreError: true
})
const http = axios.create({
timeout: 50e3,
withCredentials: true,
// WARNING: This value will be ignored.
jar,
})
const headers = {
'origin': `https://${host}`,
'referer': `https://${host}/webchat/login`,
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 | } else {
reject(new Error('sig获取失败'))
}
} else {
| conditional_block |
shopee.js | = (username || '').trim()
const accountType = getLoginType(username)
const sellerCenterFeSessionHash = uuidv4()
const SPC_CDS = uuidv4()
const headers = {
'origin': `https://${host}`,
'referer': `https://${host}/account/signin`,
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',
'sc-fe-session': sellerCenterFeSessionHash,
'sc-fe-ver': 9594,
'x-forwarded-proto': 'https',
'x-forwarded-port': '443',
'x-forwarded-for': ip
}
if (tunnel) {
headers['x-timestamp'] = tunnel
}
const jar = new tough.CookieJar(null, {
ignoreError: true
})
// 子母账号cookie[domain=.shopee.com]
const subaccountCookies = []
const http = axios.create({
timeout: 50e3,
withCredentials: true,
// WARNING: This value will be ignored.
jar,
headers
})
// Set directly after wrapping instance.
axiosCookieJarSupport(http)
http.defaults.jar = jar
if (agent) {
http.defaults.httpAgent = agent
http.defaults.httpsAgent = agent
}
if (cookies) {
if (accountType === 'subaccount') {
// 子账号登录需要SPC_CDS,要不可能出错
jar.setCookieSync(cookie2str({
name: 'SPC_CDS',
value: SPC_CDS,
domain: host,
path: '/'
}, host), `https://${host}/`)
}
// 植入cookie, 保证本土店铺使用原有的
cookies.forEach(item => {
let domain = item.domain
if (('.' + host).includes(domain)) {
// 提交SPC_SC_UD 会导致登录报400错误
// if (keyCookieNames.indexOf(item.name) === -1 && item.value) {
if (item.name === 'SPC_F') {
jar.setCookieSync(cookie2str(item, host), `https://${host}/`)
}
} else {
// 例如.shopee.com 登录子母账号的cookie信息
if (domain.match(/shopee\.com$/)) {
subaccountCookies.push(item)
jar.setCookieSync(cookie2str(item, 'seller.shopee.com'), 'https://account.seller.shopee.com/')
}
}
})
}
const getCookie = () => {
return new Promise((resolve, reject) => {
jar.getCookies(`https://${host}/`, (err, cookies) => {
if (err) {
reject(err)
} else {
let names = []
let list = []
cookies.forEach(item => {
if (!item.value) {
// value不存在时,返回
return
}
let domain = item.domain || host
if (domain && domain.startsWith('shopee')) {
domain = '.' + domain
}
let ret = {
name: item.key,
value: item.value || '',
domain,
path: item.path,
secure: item.secure,
httpOnly: item.httpOnly,
hostOnly: item.hostOnly,
}
if (item.expires) {
ret.expirationDate = (new Date(item.expires)).getTime() / 1000
} else {
ret.session = true
}
let index = names.indexOf(ret.name)
if (index !== -1) {
// SPC_SC_TK存在两个, seller.my.shopee.cn这个域下的值为空。
// my.shopee.cn下的有值
if (!list[index].value) {
list.splice(index, 1)
}
}
names.push(ret.name)
list.push(ret)
})
resolve(list.concat(subaccountCookies))
}
})
})
}
const getFingerprint = () => {
return new Promise((resolve, reject) => {
if (accountType === 'subaccount') {
http.get('https://account.seller.shopee.com/api/get_fingerprint/').then(({ data: res }) => {
resolve(res.fingerprint)
}, reject)
} else {
resolve()
}
})
}
const loginSig = (shopId) => {
return new Promise((resolve, reject) => {
if (accountType === 'subaccount') {
console.log('getSIG', shopId)
// 当前请求403应该是少一个cookie
http.get(`https://${host}/api/selleraccount/subaccount/get_sig/`, {
params: {
SPC_CDS,
SPC_CDS_VER: 2,
target_shop_id: shopId
}
}).then(({ data: res }) => {
if (res.code === 0) {
let sigs = res.url.match(/sig=([^&]+)/)
if (sigs) {
// let sig = decodeURIComponent(sigs[1])
console.log('sig', decodeURIComponent(sigs[1]))
http.post(`https://${host}/api/v2/login/`, `sig=${sigs[1]}`, {
headers: {
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
referer: res.url
}
}).then(({ data: res2 }) => {
/*
{
"username": "xin996.my",
"shopid": 195533672,
"phone": "0000432692937569",
"sso": null,
"cs_token": null,
"portrait": "4e816700547c6cdcffb87d41fd0f259b",
"id": 195536606,
"errcode": 0,
"token": "sa_baaaff47f2f3af36250a223f3b4dd87f",
"subaccount_id": 387498,
"sub_account_token": "Nx0gl0ysUK8Hzq8LNHpiBWV8bmUG/gI86so+9biXTyD2bu8eXmkregjUmVxM0DpL",
"email": "[email protected]"
}
*/
if (res2.errcode === 0) {
console.log('siglogin: ok')
resolve(res2)
} else {
reject(new Error(`siglogin: ${res2.errcode}, ${res2.errmsg || res2.message}`))
}
}, reject)
} else {
reject(new Error('sig获取失败'))
}
} else {
reject(new Error(`sigget: ${res.code}, ${res.message}`))
}
}, reject)
} else {
resolve()
}
})
}
return new Promise((resolve, reject) => {
getFingerprint().then(fingerprint => {
let captcha_key = createCaptcheKey()
// 密码先md5,再 sha256
if (accountType !== 'subaccount') {
// 用户名、邮箱、手机登录需要再加密
// 子账号登录只需要md5
password_hash = cryptojs.SHA256(password_hash).toString()
if (accountType === 'phone') {
username = transformPhone(username, country)
}
}
let data = `captcha=&captcha_key=${captcha_key}&remember=false&password_hash=${password_hash}&${accountType}=${username}`
if (accountType === 'subaccount') {
data += `&fingerprint=${fingerprint}`
}
console.log('post', data)
http.post(`https://${host}/api/v2/login/`, data).then(({ data: user }) => {
/*
子账号返回信息
{
username: 'ahynhhblc:suxiaoyi',
account_type: 'sub_merchant',
shopid: 195533672,
phone: '+8615058100276',
sso: 'wc+t2ht6DLsjQm7v3C9EARMQ6oIpFjj0iHofZSXgyb7D75kxYuU+hBX+SgOQt2if',
portrait: '',
id: 0,
nick_name: 'sxytest',
main_account_id: 250327,
errcode: 0,
token: 'bd83f479e53cc0b5d4161462580ff680',
subaccount_id: 387498,
email: ''
}
*/
if (user.errcode !== 0) {
return reject(user.errcode)
}
let shop = {
user,
host,
shop | ername | identifier_name |
|
shopee.js | const subaccountCookies = []
const http = axios.create({
timeout: 50e3,
withCredentials: true,
// WARNING: This value will be ignored.
jar,
headers
})
// Set directly after wrapping instance.
axiosCookieJarSupport(http)
http.defaults.jar = jar
if (agent) {
http.defaults.httpAgent = agent
http.defaults.httpsAgent = agent
}
if (cookies) {
if (accountType === 'subaccount') {
// 子账号登录需要SPC_CDS,要不可能出错
jar.setCookieSync(cookie2str({
name: 'SPC_CDS',
value: SPC_CDS,
domain: host,
path: '/'
}, host), `https://${host}/`)
}
// 植入cookie, 保证本土店铺使用原有的
cookies.forEach(item => {
let domain = item.domain
if (('.' + host).includes(domain)) {
// 提交SPC_SC_UD 会导致登录报400错误
// if (keyCookieNames.indexOf(item.name) === -1 && item.value) {
if (item.name === 'SPC_F') {
jar.setCookieSync(cookie2str(item, host), `https://${host}/`)
}
} else {
// 例如.shopee.com 登录子母账号的cookie信息
if (domain.match(/shopee\.com$/)) {
subaccountCookies.push(item)
jar.setCookieSync(cookie2str(item, 'seller.shopee.com'), 'https://account.seller.shopee.com/')
}
}
})
}
const getCookie = () => {
return new Promise((resolve, reject) => {
jar.getCookies(`https://${host}/`, (err, cookies) => {
if (err) {
reject(err)
} else {
let names = []
let list = []
cookies.forEach(item => {
if (!item.value) {
// value不存在时,返回
return
}
let domain = item.domain || host
if (domain && domain.startsWith('shopee')) {
domain = '.' + domain
}
let ret = {
name: item.key,
value: item.value || '',
domain,
path: item.path,
secure: item.secure,
httpOnly: item.httpOnly,
hostOnly: item.hostOnly,
}
if (item.expires) {
ret.expirationDate = (new Date(item.expires)).getTime() / 1000
} else {
ret.session = true
}
let index = names.indexOf(ret.name)
if (index !== -1) {
// SPC_SC_TK存在两个, seller.my.shopee.cn这个域下的值为空。
// my.shopee.cn下的有值
if (!list[index].value) {
list.splice(index, 1)
}
}
names.push(ret.name)
list.push(ret)
})
resolve(list.concat(subaccountCookies))
}
})
})
}
const getFingerprint = () => {
return new Promise((resolve, reject) => {
if (accountType === 'subaccount') {
http.get('https://account.seller.shopee.com/api/get_fingerprint/').then(({ data: res }) => {
resolve(res.fingerprint)
}, reject)
} else {
resolve()
}
})
}
const loginSig = (shopId) => {
return new Promise((resolve, reject) => {
if (accountType === 'subaccount') {
console.log('getSIG', shopId)
// 当前请求403应该是少一个cookie
http.get(`https://${host}/api/selleraccount/subaccount/get_sig/`, {
params: {
SPC_CDS,
SPC_CDS_VER: 2,
target_shop_id: shopId
}
}).then(({ data: res }) => {
if (res.code === 0) {
let sigs = res.url.match(/sig=([^&]+)/)
if (sigs) {
// let sig = decodeURIComponent(sigs[1])
console.log('sig', decodeURIComponent(sigs[1]))
http.post(`https://${host}/api/v2/login/`, `sig=${sigs[1]}`, {
headers: {
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
referer: res.url
}
}).then(({ data: res2 }) => {
/*
{
"username": "xin996.my",
"shopid": 195533672,
"phone": "0000432692937569",
"sso": null,
"cs_token": null,
"portrait": "4e816700547c6cdcffb87d41fd0f259b",
"id": 195536606,
"errcode": 0,
"token": "sa_baaaff47f2f3af36250a223f3b4dd87f",
"subaccount_id": 387498,
"sub_account_token": "Nx0gl0ysUK8Hzq8LNHpiBWV8bmUG/gI86so+9biXTyD2bu8eXmkregjUmVxM0DpL",
"email": "[email protected]"
}
*/
if (res2.errcode === 0) {
console.log('siglogin: ok')
resolve(res2)
} else {
reject(new Error(`siglogin: ${res2.errcode}, ${res2.errmsg || res2.message}`))
}
}, reject)
} else {
reject(new Error('sig获取失败'))
}
} else {
reject(new Error(`sigget: ${res.code}, ${res.message}`))
}
}, reject)
} else {
resolve()
}
})
}
return new Promise((resolve, reject) => {
getFingerprint().then(fingerprint => {
let captcha_key = createCaptcheKey()
// 密码先md5,再 sha256
if (accountType !== 'subaccount') {
// 用户名、邮箱、手机登录需要再加密
// 子账号登录只需要md5
password_hash = cryptojs.SHA256(password_hash).toString()
if (accountType === 'phone') {
username = transformPhone(username, country)
}
}
let data = `captcha=&captcha_key=${captcha_key}&remember=false&password_hash=${password_hash}&${accountType}=${username}`
if (accountType === 'subaccount') {
data += `&fingerprint=${fingerprint}`
}
console.log('post', data)
http.post(`https://${host}/api/v2/login/`, data).then(({ data: user }) => {
/*
子账号返回信息
{
username: 'ahynhhblc:suxiaoyi',
account_type: 'sub_merchant',
shopid: 195533672,
phone: '+8615058100276',
sso: 'wc+t2ht6DLsjQm7v3C9EARMQ6oIpFjj0iHofZSXgyb7D75kxYuU+hBX+SgOQt2if',
portrait: '',
id: 0,
nick_name: 'sxytest',
main_account_id: 250327,
errcode: 0,
token: 'bd83f479e53cc0b5d4161462580ff680',
subaccount_id: 387498,
email: ''
}
*/
if (user.errcode !== 0) {
return reject(user.errcode)
}
let shop = {
user,
host,
shopId: user.shopid,
shopName: user.username,
userId: user.id | )
const sellerCenterFeSessionHash = uuidv4()
const SPC_CDS = uuidv4()
const headers = {
'origin': `https://${host}`,
'referer': `https://${host}/account/signin`,
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',
'sc-fe-session': sellerCenterFeSessionHash,
'sc-fe-ver': 9594,
'x-forwarded-proto': 'https',
'x-forwarded-port': '443',
'x-forwarded-for': ip
}
if (tunnel) {
headers['x-timestamp'] = tunnel
}
const jar = new tough.CookieJar(null, {
ignoreError: true
})
// 子母账号cookie[domain=.shopee.com] | identifier_body |
|
shopee.js | shop.userId = user.subaccount_id || user.main_account_id
}
console.log('login.okkk', user)
loginSig(shop.shopId).then(() => {
http.post(`https://${host}/webchat/api/v1.2/login`).then(({ data: res }) => {
console.log(res)
shop.uid = res.user.uid
shop.token = res.token
shop.socketToken = res.p_token
shop.status = res.user.status || 'normal'
shop.country = (res.user.country || country).toLowerCase()
getCookie().then(cookies2 => {
shop.cookies = cookies2
resolve(shop)
}, reject)
}, reject)
}, reject)
}, reject)
}, reject)
})
},
// cookies [{name, value, key}]
authorize({ host, username, password, device_id, ip, cookies }, agent) {
const jar = new tough.CookieJar(null, {
ignoreError: true
})
const http = axios.create({
timeout: 50e3,
withCredentials: true,
// WARNING: This value will be ignored.
jar,
})
const headers = {
'origin': `https://${host}`,
'referer': `https://${host}/webchat/login`,
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',
'x-forwarded-proto': 'https',
'x-forwarded-port': '443',
'x-forwarded-for': ip
}
// Set directly after wrapping instance.
axiosCookieJarSupport(http)
http.defaults.jar = jar
if (agent) {
http.defaults.httpAgent = agent
http.defaults.httpsAgent = agent
}
if (cookies) {
// 植入cookie, 保证本土店铺使用原有的
cookies.forEach(item => {
// 提交SPC_SC_UD 会导致登录报400错误
if (item.name !== 'SPC_SC_UD') {
jar.setCookieSync(cookie2str(item, host), `https://${host}/`)
}
})
}
const getCookie = () => {
return new Promise((resolve, reject) => {
jar.getCookies(`https://${host}/`, (err, cookies) => {
if (err) {
reject(err)
} else {
let names = []
let list = []
cookies.forEach(item => {
if (!item.value) {
// value不存在时,返回
return
}
let ret = {
name: item.key,
value: item.value || '',
domain: item.domain,
path: item.path,
secure: item.secure,
httpOnly: item.httpOnly,
hostOnly: item.hostOnly,
}
if (item.expires) {
ret.expirationDate = (new Date(item.expires)).getTime() / 1000
} else {
ret.session = true
}
let index = names.indexOf(ret.name)
if (index !== -1) {
// SPC_SC_TK存在两个, seller.my.shopee.cn这个域下的值为空。
// my.shopee.cn下的有值
if (!list[index].value) {
list.splice(index, 1)
}
}
names.push(ret.name)
list.push(ret)
})
resolve(list)
}
})
})
}
return new Promise((resolve, reject) => {
// TODO 支持下邮箱登录
http.post(`https://${host}/webchat/api/v1/sessions?_v=2.8.0`, {
device_id,
username,
password
}, {
headers
}).then(({ data: res }) => {
console.log(res)
let shop = {
host,
shopId: res.user.shop_id,
shopName: res.user.username,
userId: res.user.id,
uid: res.user.uid,
token: res.token,
socketToken: res.p_token,
status: res.status
}
if (res.user.country) {
shop.country = res.user.country.toLowerCase()
}
headers.referer = `https://${host}/`
// 获取网站的SPC_SC_TK, 这样可以直接访问网站了
http.get(`https://${host}/api/v2/login/`, {
headers
}).then(({ data: user }) => {
console.log('get-login')
// 后续可能会使用,暂时不用
shop.user = user
if (!shop.shopId) {
// 如果是母子账号可能上面没有shopId,这里添加上
shop.shopId = user.shopid
}
shop.subaccountId = user.subaccount_id
if (username.match(/:\w+$/)) {
// 母子账号
http.get(`https://${host}/api/selleraccount/subaccount/get_sig/?target_shop_id=${user.shopid}`).then(({ data: res3 }) => {
if (res3.code === 0) {
let sigs = res3.url.match(/sig=([^&]+)/)
if (sigs) {
console.log('sig ', sigs[1])
headers['Content-Type'] = 'application/x-www-form-urlencoded'
http.post(`https://${host}/api/v2/login/`, `sig=${sigs[1]}`, {
headers
}).then(getCookie).then(cookies => {
console.log('post-login')
shop.cookies = cookies
resolve(shop)
}, reject)
}
} else {
reject(new Error(res3.message))
}
}, reject)
} else {
getCookie().then(cookies => {
shop.cookies = cookies
resolve(shop)
}, reject)
}
/*
{
"username": "egogoods",
"shopid": 41848116,
"errcode": 0,
"phone": "88613684909622",
"sso": "xTykgI2MSp0TSE9w9t43lkaXxbhz843SXVEA4B6Jz5roxg+gGpabtBd1vpTLId4yXJJUsvf/rnsjgjtysKlvf/4AnnD2mtWW2OzC6+JSzj4vy1LZpQxwJPTr0yuDz1UL+vBSlyAkXxcuituAN7kwKZkckQNs3eYVVR90auQGxLQ=",
"email": "[email protected]",
"token": "WQTtIaFgKw+dnSZ9asw9xT1axGXWHTVYtFnF1X7WoxyJV4uNEDMIUGIiah7+P3B/",
"cs_token": "xTykgI2MSp0TSE9w9t43lkaXxbhz843SXVEA4B6Jz5roxg+gGpabtBd1vpTLId4yXJJUsvf/rnsjgjtysKlvf/4AnnD2mtWW2OzC6+JSzj4vy1LZpQxwJPTr0yuDz1UL+vBSlyAkXxcuituAN7kwKZkckQNs3eYVVR90auQGxLQ=",
"portrait": "996c1bb9e48618f296dcbaf3a46eae00",
"id": 41849502,
"sub_account_token": null
}
*/
}, reject)
}, reject)
})
}
}
/*
// res响应
{
"status": "verified",
"p_token": "cH++uh7oUF8niXSvQvaOA8Xtiw0QSXLpWymwvIgfqlg=",
"token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6Im1hbmdvYWxpLm15IiwiY3JlYXRlX3RpbWUiOjE1NzEzOTMxNjcsImlkIjoiZTczOGQzOTZmMThlMTFlOWFlODhjY2JiZmU1ZGViZjUiLCJkZXZpY2VfaWQiOiI2OGRlMDE1NS1mMTM3LTQwNjItOTQ2NC0wMWIwYzYxMjM2NDAifQ.OzagQggkrWsQxa4m1Q0ukg-ldhh1iyuI7M5YWKMbEpc", | "user": {
"username": "mangoali.my",
"rating": 0,
"uid": "0-16248284", | random_line_split |
|
slice_test.rs | sth = [1,2,3,4,5];
let mut_slice = &mut sth[0..3];
mut_slice.swap(0, 2);
print_slice(&mut_slice);
}
pub fn reverse() {
println!("reverse");
let mut array = [1,2,3,4,5,6,7,8];
move_in_array(array);
drop(array);
let slice = &mut array[0..];
slice.reverse();
print_slice(&slice);
}
pub fn array_iteration() {
println!("1");
let array = [2,3,4,5,6,7];
let mut iter = array.iter();
while let Some(wtf) = iter.next(){
println!("{}", wtf);
}
println!("2");
let mut array = [2,3,4,5,6,7];
let mut iter = array.iter_mut();
while let Some(wtf) = iter.next() {
*wtf = *wtf + 1;
println!("{}", wtf);
}
println!("3");
let array = [2,3,4,5,6,7];
for i in &array {
println!("{}", i);
}
println!("4");
let mut array = [2,3,4,5,6,7];
for i in &mut array {
*i = *i + 1;
println!("{}", i);
}
println!("5");
let array = [2,3,4,5,6,7];
let slice = & array[..];
for i in slice {
println!("{}", i);
}
println!("6");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice {
*i = *i + 1;
println!("{}", i);
}
println!("7");
let array = [2,3,4,5,6,7];
let slice = &array[..];
for i in slice.iter() {
println!("{}", i);
}
println!("8");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice.iter_mut() {
*i = *i + 1;
println!("{}", i);
}
println!("9");
let array = [2,3,4,5,6,7];
let slice = & array[..];
let mut iter = slice.iter();
while let Some(i) = iter.next() {
println!("{}", i);
}
println!("10");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
while let Some(i) = iter_mut.next() {
*i = *i + 1;
println!("{}", i);
}
}
pub fn slice_iterator_lifetime() | // without errors. Here, the Rust borrow checker allows i1 and i2
// to simultaneously exists. Hence, it is important for the API designer
// to ensure that i1 and i2 do not refer to the same content in the original
// slice
println!("{}", i1);
println!("{}", i2);
// if i borrow from the generated iter_mut temporary variable,
// the following code would not compile
let i = slice.iter_mut().next().unwrap();
*i = *i + 1;
}
pub fn window() {
// Why we can't implement windows_mut?
// if we implement windows_mut, we can get two
// mutable slices that share a portion of the elements
// in the original slice.
let slice = &[1,2,3,4,5][..];
let wins = slice.windows(3);
for win in wins {
println!("start of a window");
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
// assert!(wins.next() == None);
let slice = &[1,2,3,4,5][..];
let mut wins = slice.windows(3);
for win in &mut wins {
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
assert!(wins.next() == None);
}
pub fn chunks() {
let slice = &[1,2,3,4,5][..];
let chunks = slice.chunks(3);
for chunk in chunks {
println!("a new chunk");
for i in chunk.iter() {
println!("{}", i);
}
}
}
pub fn chunks_mut() {
let slice = &mut [1,2,3,4,5][..];
let mut chunks_mut = slice.chunks_mut(3);
// chunk1 and chunk2 do not borrow from chunks_mut.
// instead, both of them can be viewed as special (I don't know how to characterize them)
// borrows from slice and extend the lifetime of the slice
// Let me try to explain why the following code works:
// Rust does not prevent you from creating mutable borrows, Rust only examines
// whether the things being borrowed has been borrowed before.
// I'm suspecting that Rust only inspect whether object on the right hand side
// of an assignment has been borrowed before, and it will not bother doing additional
// checking.
// In the following two lines of code, Rust only tries to inspect whether chunks_mut has
// been borrowed before. Since the lifetime of both chunk1 and chunk2 are not tied to
// chunks_mut, chunks_mut is never considered borrowed.
// chunk1 and chunk2 are actually borrowed from slice, since chunk1 is used by the third line
// how is that possible to borrow chunk2 from slice? Because Rust doesn't check it. When creating
// chunk2 variable, Rust only examines whether chunk_mut has been borrowed, it will not
// inspect whether slice has been borrowed.
// Finally, successfully created mutably reference can be freely used anywhere.
let chunk1 = chunks_mut.next().unwrap();
let chunk2 = chunks_mut.next().unwrap();
chunk1[0] = chunk1[0] + 1;
// if we try to borrow from slice, it won't pass compilation
// let sth = slice.windows(3);
chunk2[0] = chunk2[0] + 1;
for i in chunk1.iter() {
println!("{}", i);
}
for i in chunk2.iter() {
println!("{}", i);
}
let chunk = slice.chunks_mut(3).next().unwrap();
// drop(slice);
// let chunk1 = slice.chunks_mut(3).next().unwrap();
for i in chunk {
*i += 1;
println!("{}", i);
}
// I can not do something like this, [1,2,3,4,5] will be dropped
// let slice_mut = (&mut [1,2,3,4,5][..]).chunks_mut(3).next().unwrap();
// for i in slice_mut {
// *i = *i + 1;
// println!("{}", i)
// }
// I can do something like this
let slice_mut = &mut [1,2,3,4,5][..];
// Note that chunk1 borrow from slice_mut, not from chunks
let chunk1 = slice_mut.chunks_mut(3).next().unwrap();
// Note that an attemp to create another chunk will fail, because
// the bck knows that slice_mut has been borrowed before.
// reference can be transitive (I don't know if this interpretation is correct)
// let chunk2 = slice_mut.chunks_mut(3).next().unwrap();
for i in chunk1 {
*i += 1;
println!("{}", i);
}
}
pub fn split_at_mut() {
let slice = &mut [1,2,3,4,5][..];
let (fst, snd) = slice.split_at_mut(2);
// We can't do neither of these. Because slice has been
// borrowed and has an extedned lifetime to the end of this
// function.
// let (haha, hehe) = slice.split_at_mut(2);
// slice[0] = slice[0] + 1;
fst[0] = fst[0] + 1;
snd[0 | {
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
// i1 does not borrow from iter_mut, it is treated as a
// a borrow of slice, and extends the lifetime of slice
let i1 = iter_mut.next().unwrap();
// i2 is similar as i1
let i2 = iter_mut.next().unwrap();
*i1 = *i1 + 1;
// We can not borrow slice because i1 and i2 are still in use
// let mut another_iter_mut = slice.iter_mut();
*i2 = *i2 + 1;
// due to the previous reasons, we are free to modify i1 and i2 | identifier_body |
slice_test.rs | ,7];
let slice = & array[..];
for i in slice {
println!("{}", i);
}
println!("6");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice {
*i = *i + 1;
println!("{}", i);
}
println!("7");
let array = [2,3,4,5,6,7];
let slice = &array[..];
for i in slice.iter() {
println!("{}", i);
}
println!("8");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice.iter_mut() {
*i = *i + 1;
println!("{}", i);
}
println!("9");
let array = [2,3,4,5,6,7];
let slice = & array[..];
let mut iter = slice.iter();
while let Some(i) = iter.next() {
println!("{}", i);
}
println!("10");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
while let Some(i) = iter_mut.next() {
*i = *i + 1;
println!("{}", i);
}
}
pub fn slice_iterator_lifetime() {
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
// i1 does not borrow from iter_mut, it is treated as a
// a borrow of slice, and extends the lifetime of slice
let i1 = iter_mut.next().unwrap();
// i2 is similar as i1
let i2 = iter_mut.next().unwrap();
*i1 = *i1 + 1;
// We can not borrow slice because i1 and i2 are still in use
// let mut another_iter_mut = slice.iter_mut();
*i2 = *i2 + 1;
// due to the previous reasons, we are free to modify i1 and i2
// without errors. Here, the Rust borrow checker allows i1 and i2
// to simultaneously exists. Hence, it is important for the API designer
// to ensure that i1 and i2 do not refer to the same content in the original
// slice
println!("{}", i1);
println!("{}", i2);
// if i borrow from the generated iter_mut temporary variable,
// the following code would not compile
let i = slice.iter_mut().next().unwrap();
*i = *i + 1;
}
pub fn window() {
// Why we can't implement windows_mut?
// if we implement windows_mut, we can get two
// mutable slices that share a portion of the elements
// in the original slice.
let slice = &[1,2,3,4,5][..];
let wins = slice.windows(3);
for win in wins {
println!("start of a window");
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
// assert!(wins.next() == None);
let slice = &[1,2,3,4,5][..];
let mut wins = slice.windows(3);
for win in &mut wins {
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
assert!(wins.next() == None);
}
pub fn chunks() {
let slice = &[1,2,3,4,5][..];
let chunks = slice.chunks(3);
for chunk in chunks {
println!("a new chunk");
for i in chunk.iter() {
println!("{}", i);
}
}
}
pub fn chunks_mut() {
let slice = &mut [1,2,3,4,5][..];
let mut chunks_mut = slice.chunks_mut(3);
// chunk1 and chunk2 do not borrow from chunks_mut.
// instead, both of them can be viewed as special (I don't know how to characterize them)
// borrows from slice and extend the lifetime of the slice
// Let me try to explain why the following code works:
// Rust does not prevent you from creating mutable borrows, Rust only examines
// whether the things being borrowed has been borrowed before.
// I'm suspecting that Rust only inspect whether object on the right hand side
// of an assignment has been borrowed before, and it will not bother doing additional
// checking.
// In the following two lines of code, Rust only tries to inspect whether chunks_mut has
// been borrowed before. Since the lifetime of both chunk1 and chunk2 are not tied to
// chunks_mut, chunks_mut is never considered borrowed.
// chunk1 and chunk2 are actually borrowed from slice, since chunk1 is used by the third line
// how is that possible to borrow chunk2 from slice? Because Rust doesn't check it. When creating
// chunk2 variable, Rust only examines whether chunk_mut has been borrowed, it will not
// inspect whether slice has been borrowed.
// Finally, successfully created mutably reference can be freely used anywhere.
let chunk1 = chunks_mut.next().unwrap();
let chunk2 = chunks_mut.next().unwrap();
chunk1[0] = chunk1[0] + 1;
// if we try to borrow from slice, it won't pass compilation
// let sth = slice.windows(3);
chunk2[0] = chunk2[0] + 1;
for i in chunk1.iter() {
println!("{}", i);
}
for i in chunk2.iter() {
println!("{}", i);
}
let chunk = slice.chunks_mut(3).next().unwrap();
// drop(slice);
// let chunk1 = slice.chunks_mut(3).next().unwrap();
for i in chunk {
*i += 1;
println!("{}", i);
}
// I can not do something like this, [1,2,3,4,5] will be dropped
// let slice_mut = (&mut [1,2,3,4,5][..]).chunks_mut(3).next().unwrap();
// for i in slice_mut {
// *i = *i + 1;
// println!("{}", i)
// }
// I can do something like this
let slice_mut = &mut [1,2,3,4,5][..];
// Note that chunk1 borrow from slice_mut, not from chunks
let chunk1 = slice_mut.chunks_mut(3).next().unwrap();
// Note that an attemp to create another chunk will fail, because
// the bck knows that slice_mut has been borrowed before.
// reference can be transitive (I don't know if this interpretation is correct)
// let chunk2 = slice_mut.chunks_mut(3).next().unwrap();
for i in chunk1 {
*i += 1;
println!("{}", i);
}
}
pub fn split_at_mut() {
let slice = &mut [1,2,3,4,5][..];
let (fst, snd) = slice.split_at_mut(2);
// We can't do neither of these. Because slice has been
// borrowed and has an extedned lifetime to the end of this
// function.
// let (haha, hehe) = slice.split_at_mut(2);
// slice[0] = slice[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
print_slice(slice);
slice[0] = slice[0] + 1;
}
pub fn split_mut() {
let slice = &mut [1,2,3,4,5][..];
let split = slice.split_mut(|i| {
*i == 3
});
for chunk in split {
println!("a new chunk");
for i in chunk {
*i += 1;
println!("{}", i);
}
}
}
pub fn sort_and_search() {
let mut vec = vec!(2,1,2,4,3,2,3,2,1,3,2,3,4,5);
println!("{:?}", &vec);
let slice = &mut vec[..];
slice.sort_unstable();
let res = slice.binary_search(&3);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => {
println!("please insert 3 at index {}", i);
}
}
let res = slice.binary_search(&109);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => | {
println!("please insert 109 at index {}", i);
vec.insert(i, 109);
} | conditional_block |
|
slice_test.rs | ();
while let Some(wtf) = iter.next(){
println!("{}", wtf);
}
println!("2");
let mut array = [2,3,4,5,6,7];
let mut iter = array.iter_mut();
while let Some(wtf) = iter.next() {
*wtf = *wtf + 1;
println!("{}", wtf);
}
println!("3");
let array = [2,3,4,5,6,7];
for i in &array {
println!("{}", i);
}
println!("4");
let mut array = [2,3,4,5,6,7];
for i in &mut array {
*i = *i + 1;
println!("{}", i);
}
println!("5");
let array = [2,3,4,5,6,7];
let slice = & array[..];
for i in slice {
println!("{}", i);
}
println!("6");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice {
*i = *i + 1;
println!("{}", i);
}
println!("7");
let array = [2,3,4,5,6,7];
let slice = &array[..];
for i in slice.iter() {
println!("{}", i);
}
println!("8");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice.iter_mut() {
*i = *i + 1;
println!("{}", i);
}
println!("9");
let array = [2,3,4,5,6,7];
let slice = & array[..];
let mut iter = slice.iter();
while let Some(i) = iter.next() {
println!("{}", i);
}
println!("10");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
while let Some(i) = iter_mut.next() {
*i = *i + 1;
println!("{}", i);
}
}
pub fn slice_iterator_lifetime() {
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
// i1 does not borrow from iter_mut, it is treated as a
// a borrow of slice, and extends the lifetime of slice
let i1 = iter_mut.next().unwrap();
// i2 is similar as i1
let i2 = iter_mut.next().unwrap();
*i1 = *i1 + 1;
// We can not borrow slice because i1 and i2 are still in use
// let mut another_iter_mut = slice.iter_mut();
*i2 = *i2 + 1;
// due to the previous reasons, we are free to modify i1 and i2
// without errors. Here, the Rust borrow checker allows i1 and i2
// to simultaneously exists. Hence, it is important for the API designer
// to ensure that i1 and i2 do not refer to the same content in the original
// slice
println!("{}", i1);
println!("{}", i2);
// if i borrow from the generated iter_mut temporary variable,
// the following code would not compile
let i = slice.iter_mut().next().unwrap();
*i = *i + 1;
}
pub fn window() {
// Why we can't implement windows_mut?
// if we implement windows_mut, we can get two
// mutable slices that share a portion of the elements
// in the original slice.
let slice = &[1,2,3,4,5][..];
let wins = slice.windows(3);
for win in wins {
println!("start of a window");
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
// assert!(wins.next() == None);
let slice = &[1,2,3,4,5][..];
let mut wins = slice.windows(3);
for win in &mut wins {
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
assert!(wins.next() == None);
}
pub fn chunks() {
let slice = &[1,2,3,4,5][..];
let chunks = slice.chunks(3);
for chunk in chunks {
println!("a new chunk");
for i in chunk.iter() {
println!("{}", i);
}
}
}
pub fn chunks_mut() {
let slice = &mut [1,2,3,4,5][..];
let mut chunks_mut = slice.chunks_mut(3);
// chunk1 and chunk2 do not borrow from chunks_mut.
// instead, both of them can be viewed as special (I don't know how to characterize them)
// borrows from slice and extend the lifetime of the slice
// Let me try to explain why the following code works:
// Rust does not prevent you from creating mutable borrows, Rust only examines
// whether the things being borrowed has been borrowed before.
// I'm suspecting that Rust only inspect whether object on the right hand side
// of an assignment has been borrowed before, and it will not bother doing additional
// checking.
// In the following two lines of code, Rust only tries to inspect whether chunks_mut has
// been borrowed before. Since the lifetime of both chunk1 and chunk2 are not tied to
// chunks_mut, chunks_mut is never considered borrowed.
// chunk1 and chunk2 are actually borrowed from slice, since chunk1 is used by the third line
// how is that possible to borrow chunk2 from slice? Because Rust doesn't check it. When creating
// chunk2 variable, Rust only examines whether chunk_mut has been borrowed, it will not
// inspect whether slice has been borrowed.
// Finally, successfully created mutably reference can be freely used anywhere.
let chunk1 = chunks_mut.next().unwrap();
let chunk2 = chunks_mut.next().unwrap();
chunk1[0] = chunk1[0] + 1;
// if we try to borrow from slice, it won't pass compilation
// let sth = slice.windows(3);
chunk2[0] = chunk2[0] + 1;
for i in chunk1.iter() {
println!("{}", i);
}
for i in chunk2.iter() {
println!("{}", i);
}
let chunk = slice.chunks_mut(3).next().unwrap();
// drop(slice);
// let chunk1 = slice.chunks_mut(3).next().unwrap();
for i in chunk {
*i += 1;
println!("{}", i);
}
// I can not do something like this, [1,2,3,4,5] will be dropped
// let slice_mut = (&mut [1,2,3,4,5][..]).chunks_mut(3).next().unwrap();
// for i in slice_mut {
// *i = *i + 1;
// println!("{}", i)
// }
// I can do something like this
let slice_mut = &mut [1,2,3,4,5][..];
// Note that chunk1 borrow from slice_mut, not from chunks
let chunk1 = slice_mut.chunks_mut(3).next().unwrap();
// Note that an attemp to create another chunk will fail, because
// the bck knows that slice_mut has been borrowed before.
// reference can be transitive (I don't know if this interpretation is correct)
// let chunk2 = slice_mut.chunks_mut(3).next().unwrap();
for i in chunk1 {
*i += 1;
println!("{}", i);
}
}
pub fn split_at_mut() {
let slice = &mut [1,2,3,4,5][..];
let (fst, snd) = slice.split_at_mut(2);
// We can't do neither of these. Because slice has been
// borrowed and has an extedned lifetime to the end of this
// function.
// let (haha, hehe) = slice.split_at_mut(2);
// slice[0] = slice[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
print_slice(slice);
slice[0] = slice[0] + 1;
}
pub fn split_mut() {
let slice = &mut [1,2,3,4,5][..];
let split = slice.split_mut(|i| {
*i == 3
});
for chunk in split {
println!("a new chunk");
for i in chunk {
*i += 1;
println!("{}", i);
}
}
}
pub fn | sort_and_search | identifier_name |
|
slice_test.rs | }
pub fn slice_size_len() {
println!("{}", mem::size_of::<&[i32]>());
// println!("{}", mem::size_of::<[i32]>());
let sth = [1,2,3];
let slice = &sth[0..3];
println!("slice.len");
println!("{}",slice.len());
assert!(slice.first() == Some(&1));
}
pub fn slice_split_first() {
let slice = &mut [1,2,3,4,5][..];
slice.split_first().map(|(fst_elem, rest_slice)| {
println!("{}", fst_elem);
println!("rest");
print_slice(rest_slice);
});
}
pub fn empty_slice() {
let empty_slice : &[i32] = &[];
println!("{}", empty_slice.len());
}
pub fn bracket_operator() {
let array_boxed = [Box::new(1), Box::new(2), Box::new(3)];
let slice = &array_boxed[0..3];
let v1 = &slice[2];
// why deref doesn't work here
assert!(&**v1 == &3);
}
pub fn swap() {
println!("swap");
let mut sth = [1,2,3,4,5];
let mut_slice = &mut sth[0..3];
mut_slice.swap(0, 2);
print_slice(&mut_slice);
}
pub fn reverse() {
println!("reverse");
let mut array = [1,2,3,4,5,6,7,8];
move_in_array(array);
drop(array);
let slice = &mut array[0..];
slice.reverse();
print_slice(&slice);
}
pub fn array_iteration() {
println!("1");
let array = [2,3,4,5,6,7];
let mut iter = array.iter();
while let Some(wtf) = iter.next(){
println!("{}", wtf);
}
println!("2");
let mut array = [2,3,4,5,6,7];
let mut iter = array.iter_mut();
while let Some(wtf) = iter.next() {
*wtf = *wtf + 1;
println!("{}", wtf);
}
println!("3");
let array = [2,3,4,5,6,7];
for i in &array {
println!("{}", i);
}
println!("4");
let mut array = [2,3,4,5,6,7];
for i in &mut array {
*i = *i + 1;
println!("{}", i);
}
println!("5");
let array = [2,3,4,5,6,7];
let slice = & array[..];
for i in slice {
println!("{}", i);
}
println!("6");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice {
*i = *i + 1;
println!("{}", i);
}
println!("7");
let array = [2,3,4,5,6,7];
let slice = &array[..];
for i in slice.iter() {
println!("{}", i);
}
println!("8");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice.iter_mut() {
*i = *i + 1;
println!("{}", i);
}
println!("9");
let array = [2,3,4,5,6,7];
let slice = & array[..];
let mut iter = slice.iter();
while let Some(i) = iter.next() {
println!("{}", i);
}
println!("10");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
while let Some(i) = iter_mut.next() {
*i = *i + 1;
println!("{}", i);
}
}
pub fn slice_iterator_lifetime() {
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
// i1 does not borrow from iter_mut, it is treated as a
// a borrow of slice, and extends the lifetime of slice
let i1 = iter_mut.next().unwrap();
// i2 is similar as i1
let i2 = iter_mut.next().unwrap();
*i1 = *i1 + 1;
// We can not borrow slice because i1 and i2 are still in use
// let mut another_iter_mut = slice.iter_mut();
*i2 = *i2 + 1;
// due to the previous reasons, we are free to modify i1 and i2
// without errors. Here, the Rust borrow checker allows i1 and i2
// to simultaneously exists. Hence, it is important for the API designer
// to ensure that i1 and i2 do not refer to the same content in the original
// slice
println!("{}", i1);
println!("{}", i2);
// if i borrow from the generated iter_mut temporary variable,
// the following code would not compile
let i = slice.iter_mut().next().unwrap();
*i = *i + 1;
}
pub fn window() {
// Why we can't implement windows_mut?
// if we implement windows_mut, we can get two
// mutable slices that share a portion of the elements
// in the original slice.
let slice = &[1,2,3,4,5][..];
let wins = slice.windows(3);
for win in wins {
println!("start of a window");
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
// assert!(wins.next() == None);
let slice = &[1,2,3,4,5][..];
let mut wins = slice.windows(3);
for win in &mut wins {
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
assert!(wins.next() == None);
}
pub fn chunks() {
let slice = &[1,2,3,4,5][..];
let chunks = slice.chunks(3);
for chunk in chunks {
println!("a new chunk");
for i in chunk.iter() {
println!("{}", i);
}
}
}
pub fn chunks_mut() {
let slice = &mut [1,2,3,4,5][..];
let mut chunks_mut = slice.chunks_mut(3);
// chunk1 and chunk2 do not borrow from chunks_mut.
// instead, both of them can be viewed as special (I don't know how to characterize them)
// borrows from slice and extend the lifetime of the slice
// Let me try to explain why the following code works:
// Rust does not prevent you from creating mutable borrows, Rust only examines
// whether the things being borrowed has been borrowed before.
// I'm suspecting that Rust only inspect whether object on the right hand side
// of an assignment has been borrowed before, and it will not bother doing additional
// checking.
// In the following two lines of code, Rust only tries to inspect whether chunks_mut has
// been borrowed before. Since the lifetime of both chunk1 and chunk2 are not tied to
// chunks_mut, chunks_mut is never considered borrowed.
// chunk1 and chunk2 are actually borrowed from slice, since chunk1 is used by the third line
// how is that possible to borrow chunk2 from slice? Because Rust doesn't check it. When creating
// chunk2 variable, Rust only examines whether chunk_mut has been borrowed, it will not
// inspect whether slice has been borrowed.
// Finally, successfully created mutably reference can be freely used anywhere.
let chunk1 = chunks_mut.next().unwrap();
let chunk2 = chunks_mut.next().unwrap();
chunk1[0] = chunk1[0] + 1;
// if we try to borrow from slice, it won't pass compilation
// let sth = slice.windows(3);
chunk2[0] = chunk2[0] + 1;
for i in chunk1.iter() {
println!("{}", i);
}
for i in chunk2.iter() {
println!("{}", i);
}
let chunk = slice.chunks_mut(3).next().unwrap();
// drop(slice);
// let chunk1 = slice.chunks_mut(3).next().unwrap();
for i in chunk {
*i += 1;
println!("{}", i);
}
// I can not do something like this, [1,2,3,4,5] will be dropped
// let slice_mut = (&mut [1,2,3,4,5][..]).chunks_mut(3).next().unwrap();
// for i in slice_mut {
// *i = *i + 1;
// println!("{}", i)
// | *i = *i + 1;
println!("{} ", i);
} | random_line_split |
|
tree_search.py | in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import numpy as np
from nums.core.optimizer.comp_graph import GraphArray, TreeNode, BinaryOp, ReductionOp, Leaf, UnaryOp
random_state = np.random.RandomState(1337)
class ProgramState(object):
def __init__(self, arr: GraphArray,
max_reduction_pairs=None,
force_final_action=True,
unique_reduction_pairs=False):
self.arr: GraphArray = arr
self.force_final_action = force_final_action
self.get_action_kwargs = {"max_reduction_pairs": max_reduction_pairs,
"unique_reduction_pairs": unique_reduction_pairs}
self.tnode_map = {}
self.init_frontier()
def num_nodes(self):
r = 0
for grid_entry in self.arr.grid.get_entry_iterator():
root: TreeNode = self.arr.graphs[grid_entry]
r += root.num_nodes()
return r
def init_frontier(self):
for grid_entry in self.arr.grid.get_entry_iterator():
self.add_frontier_tree(self.arr.graphs[grid_entry])
def add_frontier_tree(self, start_node: TreeNode):
for tnode in start_node.get_frontier():
self.add_frontier_node(tnode)
def get_bc_action(self, tnode: TreeNode):
# This is hacky, but no good way to do it w/ current abstractions.
if isinstance(tnode, BinaryOp):
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id})]
elif isinstance(tnode, ReductionOp):
leaf_ids = tuple(tnode.leafs_dict.keys())[:2]
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id,
"leaf_ids": leaf_ids})]
elif isinstance(tnode, UnaryOp):
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id})]
else:
raise Exception()
return actions
def add_frontier_node(self, tnode: TreeNode):
# This is a frontier node.
actions = None
if self.force_final_action and tnode.parent is None:
if isinstance(tnode, (BinaryOp, UnaryOp)) or (isinstance(tnode, ReductionOp)
and len(tnode.children_dict) == 2):
# This is a root frontier binary op or reduction op with 2 children.
# The next action is the last action,
# so intercept action to force computation on root node entry.
actions = self.get_bc_action(tnode)
if actions is None:
actions = tnode.get_actions(**self.get_action_kwargs)
self.tnode_map[tnode.tree_node_id] = (tnode, actions)
def copy(self):
return ProgramState(self.arr.copy())
def commit_action(self, action):
| # That's it. This program state is now updated.
return self.objective(self.arr.cluster_state.resources)
def simulate_action(self, action):
tnode_id, kwargs = action
entry = self.tnode_map[tnode_id]
node: TreeNode = entry[0]
new_resources: np.ndarray = node.simulate_on(**kwargs)
return self.objective(new_resources)
def objective(self, resources):
# Our simple objective.
return np.sum(resources[1:])
def get_tnode_grid_entry(self, tnode: TreeNode):
if tnode.parent is None:
root: TreeNode = tnode
else:
root: TreeNode = tnode.get_root()
tree_root_grid_entry = None
for grid_entry in self.arr.grid.get_entry_iterator():
tree_node: TreeNode = self.arr.graphs[grid_entry]
if tree_node is root:
tree_root_grid_entry = grid_entry
break
if tree_root_grid_entry is None:
raise Exception("Bad tree.")
return tree_root_grid_entry
def update_root(self, old_root, new_root):
tree_root_grid_entry = self.get_tnode_grid_entry(old_root)
self.arr.graphs[tree_root_grid_entry] = new_root
def get_all_actions(self):
# This is not deterministic due to hashing of children for reduction nodes.
actions = []
for tnode_id in self.tnode_map:
actions += self.tnode_map[tnode_id][1]
return actions
class TreeSearch(object):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
if isinstance(seed, np.random.RandomState):
self.rs = seed
else:
assert isinstance(seed, (int, np.int))
self.rs = np.random.RandomState(seed)
self.max_samples_per_step = max_samples_per_step
self.max_reduction_pairs = max_reduction_pairs
self.force_final_action = force_final_action
def step(self, state: ProgramState):
raise NotImplementedError()
def solve(self, arr: GraphArray):
state: ProgramState = ProgramState(arr,
max_reduction_pairs=self.max_reduction_pairs,
force_final_action=self.force_final_action)
num_steps = 0
while True:
num_steps += 1
state, cost, is_done = self.step(state)
if is_done:
break
return state.arr
class BlockCyclicTS(TreeSearch):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
super().__init__(seed,
max_samples_per_step,
max_reduction_pairs,
force_final_action)
def step(self, state: ProgramState):
if len(state.tnode_map) == 0:
# We're done.
return state, state.objective(state.arr.cluster_state.resources), True
action = None
for tnode_id in state.tnode_map:
action = state.get_bc_action(state.tnode_map[tnode_id][0])[0]
break
curr_cost = state.commit_action(action)
return state, curr_cost, False
class RandomTS(TreeSearch):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
super().__init__(seed,
max_samples_per_step,
max_reduction_pairs,
force_final_action)
def sample_actions(self, state: ProgramState) -> list:
if self.max_samples_per_step is None:
return state.get_all_actions()
# Subsample a set of frontier nodes to try next.
tnode_ids = list(state.tnode_map.keys())
num_tnodes = len(tnode_ids)
if num_tnodes <= self.max_samples_per_step:
tnode_id_sample = tnode_ids
else:
idx_set = set()
tnode_id_sample = []
while len(idx_set) < self.max_samples_per_step:
i = self.rs.randint(0, num_tnodes)
if i not in idx_set:
idx_set.add(i)
tnode_id_sample.append(tnode_ids[i])
actions = []
for tnode_id in tnode_id_sample:
actions += state.tnode_map[tnode_id][1]
return actions
def step(self, state: ProgramState):
# Sampling slows things down because for some reason,
# the lowest cost computations are the sums, so
# an algorithm that finds local optima keeps the number of leafs for reductions
# small by computing them whenever they occur.
actions = self.sample_actions(state | tnode_id, kwargs = action
entry = self.tnode_map[tnode_id]
old_node: TreeNode = entry[0]
new_node: TreeNode = old_node.execute_on(**kwargs)
# The frontier needs to be updated, so remove the current node from frontier.
del self.tnode_map[tnode_id]
if old_node.parent is None and old_node is not new_node:
# We operated on a root node, so update the array.
self.update_root(old_node, new_node)
if isinstance(new_node, Leaf):
# If it's a leaf node, its parent may now be a frontier node.
new_node_parent: TreeNode = new_node.parent
if new_node_parent is not None and new_node_parent.is_frontier():
self.add_frontier_node(new_node_parent)
else:
# There's still work that needs to be done to compute this node.
# Add the returned node to the frontier.
# Either a BinaryOp or ReductionOp.
if new_node.is_frontier():
self.add_frontier_node(new_node) | identifier_body |
tree_search.py | compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import numpy as np
from nums.core.optimizer.comp_graph import GraphArray, TreeNode, BinaryOp, ReductionOp, Leaf, UnaryOp
random_state = np.random.RandomState(1337)
class ProgramState(object):
def __init__(self, arr: GraphArray,
max_reduction_pairs=None,
force_final_action=True,
unique_reduction_pairs=False):
self.arr: GraphArray = arr
self.force_final_action = force_final_action
self.get_action_kwargs = {"max_reduction_pairs": max_reduction_pairs,
"unique_reduction_pairs": unique_reduction_pairs}
self.tnode_map = {}
self.init_frontier()
def num_nodes(self):
r = 0
for grid_entry in self.arr.grid.get_entry_iterator():
root: TreeNode = self.arr.graphs[grid_entry]
r += root.num_nodes()
return r
def | (self):
for grid_entry in self.arr.grid.get_entry_iterator():
self.add_frontier_tree(self.arr.graphs[grid_entry])
def add_frontier_tree(self, start_node: TreeNode):
for tnode in start_node.get_frontier():
self.add_frontier_node(tnode)
def get_bc_action(self, tnode: TreeNode):
# This is hacky, but no good way to do it w/ current abstractions.
if isinstance(tnode, BinaryOp):
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id})]
elif isinstance(tnode, ReductionOp):
leaf_ids = tuple(tnode.leafs_dict.keys())[:2]
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id,
"leaf_ids": leaf_ids})]
elif isinstance(tnode, UnaryOp):
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id})]
else:
raise Exception()
return actions
def add_frontier_node(self, tnode: TreeNode):
# This is a frontier node.
actions = None
if self.force_final_action and tnode.parent is None:
if isinstance(tnode, (BinaryOp, UnaryOp)) or (isinstance(tnode, ReductionOp)
and len(tnode.children_dict) == 2):
# This is a root frontier binary op or reduction op with 2 children.
# The next action is the last action,
# so intercept action to force computation on root node entry.
actions = self.get_bc_action(tnode)
if actions is None:
actions = tnode.get_actions(**self.get_action_kwargs)
self.tnode_map[tnode.tree_node_id] = (tnode, actions)
def copy(self):
return ProgramState(self.arr.copy())
def commit_action(self, action):
tnode_id, kwargs = action
entry = self.tnode_map[tnode_id]
old_node: TreeNode = entry[0]
new_node: TreeNode = old_node.execute_on(**kwargs)
# The frontier needs to be updated, so remove the current node from frontier.
del self.tnode_map[tnode_id]
if old_node.parent is None and old_node is not new_node:
# We operated on a root node, so update the array.
self.update_root(old_node, new_node)
if isinstance(new_node, Leaf):
# If it's a leaf node, its parent may now be a frontier node.
new_node_parent: TreeNode = new_node.parent
if new_node_parent is not None and new_node_parent.is_frontier():
self.add_frontier_node(new_node_parent)
else:
# There's still work that needs to be done to compute this node.
# Add the returned node to the frontier.
# Either a BinaryOp or ReductionOp.
if new_node.is_frontier():
self.add_frontier_node(new_node)
# That's it. This program state is now updated.
return self.objective(self.arr.cluster_state.resources)
def simulate_action(self, action):
tnode_id, kwargs = action
entry = self.tnode_map[tnode_id]
node: TreeNode = entry[0]
new_resources: np.ndarray = node.simulate_on(**kwargs)
return self.objective(new_resources)
def objective(self, resources):
# Our simple objective.
return np.sum(resources[1:])
def get_tnode_grid_entry(self, tnode: TreeNode):
if tnode.parent is None:
root: TreeNode = tnode
else:
root: TreeNode = tnode.get_root()
tree_root_grid_entry = None
for grid_entry in self.arr.grid.get_entry_iterator():
tree_node: TreeNode = self.arr.graphs[grid_entry]
if tree_node is root:
tree_root_grid_entry = grid_entry
break
if tree_root_grid_entry is None:
raise Exception("Bad tree.")
return tree_root_grid_entry
def update_root(self, old_root, new_root):
tree_root_grid_entry = self.get_tnode_grid_entry(old_root)
self.arr.graphs[tree_root_grid_entry] = new_root
def get_all_actions(self):
# This is not deterministic due to hashing of children for reduction nodes.
actions = []
for tnode_id in self.tnode_map:
actions += self.tnode_map[tnode_id][1]
return actions
class TreeSearch(object):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
if isinstance(seed, np.random.RandomState):
self.rs = seed
else:
assert isinstance(seed, (int, np.int))
self.rs = np.random.RandomState(seed)
self.max_samples_per_step = max_samples_per_step
self.max_reduction_pairs = max_reduction_pairs
self.force_final_action = force_final_action
def step(self, state: ProgramState):
raise NotImplementedError()
def solve(self, arr: GraphArray):
state: ProgramState = ProgramState(arr,
max_reduction_pairs=self.max_reduction_pairs,
force_final_action=self.force_final_action)
num_steps = 0
while True:
num_steps += 1
state, cost, is_done = self.step(state)
if is_done:
break
return state.arr
class BlockCyclicTS(TreeSearch):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
super().__init__(seed,
max_samples_per_step,
max_reduction_pairs,
force_final_action)
def step(self, state: ProgramState):
if len(state.tnode_map) == 0:
# We're done.
return state, state.objective(state.arr.cluster_state.resources), True
action = None
for tnode_id in state.tnode_map:
action = state.get_bc_action(state.tnode_map[tnode_id][0])[0]
break
curr_cost = state.commit_action(action)
return state, curr_cost, False
class RandomTS(TreeSearch):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
super().__init__(seed,
max_samples_per_step,
max_reduction_pairs,
force_final_action)
def sample_actions(self, state: ProgramState) -> list:
if self.max_samples_per_step is None:
return state.get_all_actions()
# Subsample a set of frontier nodes to try next.
tnode_ids = list(state.tnode_map.keys())
num_tnodes = len(tnode_ids)
if num_tnodes <= self.max_samples_per_step:
tnode_id_sample = tnode_ids
else:
idx_set = set()
tnode_id_sample = []
while len(idx_set) < self.max_samples_per_step:
i = self.rs.randint(0, num_tnodes)
if i not in idx_set:
idx_set.add(i)
tnode_id_sample.append(tnode_ids[i])
actions = []
for tnode_id in tnode_id_sample:
actions += state.tnode_map[tnode_id][1]
return actions
def step(self, state: ProgramState):
# Sampling slows things down because for some reason,
# the lowest cost computations are the sums, so
# an algorithm that finds local optima keeps the number of leafs for reductions
# small by computing them whenever they occur.
actions = self.sample_actions(state | init_frontier | identifier_name |
tree_search.py | compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import numpy as np
from nums.core.optimizer.comp_graph import GraphArray, TreeNode, BinaryOp, ReductionOp, Leaf, UnaryOp
random_state = np.random.RandomState(1337)
class ProgramState(object):
def __init__(self, arr: GraphArray,
max_reduction_pairs=None,
force_final_action=True,
unique_reduction_pairs=False):
self.arr: GraphArray = arr
self.force_final_action = force_final_action
self.get_action_kwargs = {"max_reduction_pairs": max_reduction_pairs,
"unique_reduction_pairs": unique_reduction_pairs}
self.tnode_map = {}
self.init_frontier()
def num_nodes(self):
r = 0
for grid_entry in self.arr.grid.get_entry_iterator():
root: TreeNode = self.arr.graphs[grid_entry]
r += root.num_nodes()
return r
def init_frontier(self):
for grid_entry in self.arr.grid.get_entry_iterator():
self.add_frontier_tree(self.arr.graphs[grid_entry])
def add_frontier_tree(self, start_node: TreeNode):
for tnode in start_node.get_frontier():
self.add_frontier_node(tnode)
def get_bc_action(self, tnode: TreeNode):
# This is hacky, but no good way to do it w/ current abstractions.
if isinstance(tnode, BinaryOp):
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id})]
elif isinstance(tnode, ReductionOp):
leaf_ids = tuple(tnode.leafs_dict.keys())[:2]
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id,
"leaf_ids": leaf_ids})]
elif isinstance(tnode, UnaryOp):
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id})]
else:
raise Exception()
return actions
def add_frontier_node(self, tnode: TreeNode):
# This is a frontier node.
actions = None
if self.force_final_action and tnode.parent is None:
if isinstance(tnode, (BinaryOp, UnaryOp)) or (isinstance(tnode, ReductionOp)
and len(tnode.children_dict) == 2):
# This is a root frontier binary op or reduction op with 2 children.
# The next action is the last action,
# so intercept action to force computation on root node entry.
|
if actions is None:
actions = tnode.get_actions(**self.get_action_kwargs)
self.tnode_map[tnode.tree_node_id] = (tnode, actions)
def copy(self):
return ProgramState(self.arr.copy())
def commit_action(self, action):
tnode_id, kwargs = action
entry = self.tnode_map[tnode_id]
old_node: TreeNode = entry[0]
new_node: TreeNode = old_node.execute_on(**kwargs)
# The frontier needs to be updated, so remove the current node from frontier.
del self.tnode_map[tnode_id]
if old_node.parent is None and old_node is not new_node:
# We operated on a root node, so update the array.
self.update_root(old_node, new_node)
if isinstance(new_node, Leaf):
# If it's a leaf node, its parent may now be a frontier node.
new_node_parent: TreeNode = new_node.parent
if new_node_parent is not None and new_node_parent.is_frontier():
self.add_frontier_node(new_node_parent)
else:
# There's still work that needs to be done to compute this node.
# Add the returned node to the frontier.
# Either a BinaryOp or ReductionOp.
if new_node.is_frontier():
self.add_frontier_node(new_node)
# That's it. This program state is now updated.
return self.objective(self.arr.cluster_state.resources)
def simulate_action(self, action):
tnode_id, kwargs = action
entry = self.tnode_map[tnode_id]
node: TreeNode = entry[0]
new_resources: np.ndarray = node.simulate_on(**kwargs)
return self.objective(new_resources)
def objective(self, resources):
# Our simple objective.
return np.sum(resources[1:])
def get_tnode_grid_entry(self, tnode: TreeNode):
if tnode.parent is None:
root: TreeNode = tnode
else:
root: TreeNode = tnode.get_root()
tree_root_grid_entry = None
for grid_entry in self.arr.grid.get_entry_iterator():
tree_node: TreeNode = self.arr.graphs[grid_entry]
if tree_node is root:
tree_root_grid_entry = grid_entry
break
if tree_root_grid_entry is None:
raise Exception("Bad tree.")
return tree_root_grid_entry
def update_root(self, old_root, new_root):
tree_root_grid_entry = self.get_tnode_grid_entry(old_root)
self.arr.graphs[tree_root_grid_entry] = new_root
def get_all_actions(self):
# This is not deterministic due to hashing of children for reduction nodes.
actions = []
for tnode_id in self.tnode_map:
actions += self.tnode_map[tnode_id][1]
return actions
class TreeSearch(object):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
if isinstance(seed, np.random.RandomState):
self.rs = seed
else:
assert isinstance(seed, (int, np.int))
self.rs = np.random.RandomState(seed)
self.max_samples_per_step = max_samples_per_step
self.max_reduction_pairs = max_reduction_pairs
self.force_final_action = force_final_action
def step(self, state: ProgramState):
raise NotImplementedError()
def solve(self, arr: GraphArray):
state: ProgramState = ProgramState(arr,
max_reduction_pairs=self.max_reduction_pairs,
force_final_action=self.force_final_action)
num_steps = 0
while True:
num_steps += 1
state, cost, is_done = self.step(state)
if is_done:
break
return state.arr
class BlockCyclicTS(TreeSearch):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
super().__init__(seed,
max_samples_per_step,
max_reduction_pairs,
force_final_action)
def step(self, state: ProgramState):
if len(state.tnode_map) == 0:
# We're done.
return state, state.objective(state.arr.cluster_state.resources), True
action = None
for tnode_id in state.tnode_map:
action = state.get_bc_action(state.tnode_map[tnode_id][0])[0]
break
curr_cost = state.commit_action(action)
return state, curr_cost, False
class RandomTS(TreeSearch):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
super().__init__(seed,
max_samples_per_step,
max_reduction_pairs,
force_final_action)
def sample_actions(self, state: ProgramState) -> list:
if self.max_samples_per_step is None:
return state.get_all_actions()
# Subsample a set of frontier nodes to try next.
tnode_ids = list(state.tnode_map.keys())
num_tnodes = len(tnode_ids)
if num_tnodes <= self.max_samples_per_step:
tnode_id_sample = tnode_ids
else:
idx_set = set()
tnode_id_sample = []
while len(idx_set) < self.max_samples_per_step:
i = self.rs.randint(0, num_tnodes)
if i not in idx_set:
idx_set.add(i)
tnode_id_sample.append(tnode_ids[i])
actions = []
for tnode_id in tnode_id_sample:
actions += state.tnode_map[tnode_id][1]
return actions
def step(self, state: ProgramState):
# Sampling slows things down because for some reason,
# the lowest cost computations are the sums, so
# an algorithm that finds local optima keeps the number of leafs for reductions
# small by computing them whenever they occur.
actions = self.sample_actions(state | actions = self.get_bc_action(tnode) | conditional_block |
tree_search.py | #
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import numpy as np
from nums.core.optimizer.comp_graph import GraphArray, TreeNode, BinaryOp, ReductionOp, Leaf, UnaryOp
random_state = np.random.RandomState(1337)
class ProgramState(object):
def __init__(self, arr: GraphArray,
max_reduction_pairs=None,
force_final_action=True,
unique_reduction_pairs=False):
self.arr: GraphArray = arr
self.force_final_action = force_final_action
self.get_action_kwargs = {"max_reduction_pairs": max_reduction_pairs,
"unique_reduction_pairs": unique_reduction_pairs}
self.tnode_map = {}
self.init_frontier()
def num_nodes(self):
r = 0
for grid_entry in self.arr.grid.get_entry_iterator():
root: TreeNode = self.arr.graphs[grid_entry]
r += root.num_nodes()
return r
def init_frontier(self):
for grid_entry in self.arr.grid.get_entry_iterator():
self.add_frontier_tree(self.arr.graphs[grid_entry])
def add_frontier_tree(self, start_node: TreeNode):
for tnode in start_node.get_frontier():
self.add_frontier_node(tnode)
def get_bc_action(self, tnode: TreeNode):
# This is hacky, but no good way to do it w/ current abstractions.
if isinstance(tnode, BinaryOp):
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id})]
elif isinstance(tnode, ReductionOp):
leaf_ids = tuple(tnode.leafs_dict.keys())[:2]
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id,
"leaf_ids": leaf_ids})]
elif isinstance(tnode, UnaryOp):
grid_entry = self.get_tnode_grid_entry(tnode)
node_id = self.arr.cluster_state.get_cluster_entry(grid_entry, self.arr.grid.grid_shape)
actions = [(tnode.tree_node_id, {"node_id": node_id})]
else:
raise Exception()
return actions
def add_frontier_node(self, tnode: TreeNode):
# This is a frontier node.
actions = None
if self.force_final_action and tnode.parent is None:
if isinstance(tnode, (BinaryOp, UnaryOp)) or (isinstance(tnode, ReductionOp)
and len(tnode.children_dict) == 2):
# This is a root frontier binary op or reduction op with 2 children.
# The next action is the last action,
# so intercept action to force computation on root node entry.
actions = self.get_bc_action(tnode)
if actions is None:
actions = tnode.get_actions(**self.get_action_kwargs)
self.tnode_map[tnode.tree_node_id] = (tnode, actions)
def copy(self):
return ProgramState(self.arr.copy())
def commit_action(self, action):
tnode_id, kwargs = action
entry = self.tnode_map[tnode_id]
old_node: TreeNode = entry[0]
new_node: TreeNode = old_node.execute_on(**kwargs)
# The frontier needs to be updated, so remove the current node from frontier.
del self.tnode_map[tnode_id]
if old_node.parent is None and old_node is not new_node:
# We operated on a root node, so update the array.
self.update_root(old_node, new_node)
if isinstance(new_node, Leaf):
# If it's a leaf node, its parent may now be a frontier node.
new_node_parent: TreeNode = new_node.parent
if new_node_parent is not None and new_node_parent.is_frontier():
self.add_frontier_node(new_node_parent)
else:
# There's still work that needs to be done to compute this node.
# Add the returned node to the frontier.
# Either a BinaryOp or ReductionOp.
if new_node.is_frontier():
self.add_frontier_node(new_node)
# That's it. This program state is now updated.
return self.objective(self.arr.cluster_state.resources)
def simulate_action(self, action):
tnode_id, kwargs = action
entry = self.tnode_map[tnode_id]
node: TreeNode = entry[0]
new_resources: np.ndarray = node.simulate_on(**kwargs)
return self.objective(new_resources)
def objective(self, resources):
# Our simple objective.
return np.sum(resources[1:])
def get_tnode_grid_entry(self, tnode: TreeNode):
if tnode.parent is None:
root: TreeNode = tnode
else:
root: TreeNode = tnode.get_root()
tree_root_grid_entry = None
for grid_entry in self.arr.grid.get_entry_iterator():
tree_node: TreeNode = self.arr.graphs[grid_entry]
if tree_node is root:
tree_root_grid_entry = grid_entry
break
if tree_root_grid_entry is None:
raise Exception("Bad tree.")
return tree_root_grid_entry
def update_root(self, old_root, new_root):
tree_root_grid_entry = self.get_tnode_grid_entry(old_root)
self.arr.graphs[tree_root_grid_entry] = new_root
def get_all_actions(self):
# This is not deterministic due to hashing of children for reduction nodes.
actions = []
for tnode_id in self.tnode_map:
actions += self.tnode_map[tnode_id][1]
return actions
class TreeSearch(object):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
if isinstance(seed, np.random.RandomState):
self.rs = seed
else:
assert isinstance(seed, (int, np.int))
self.rs = np.random.RandomState(seed)
self.max_samples_per_step = max_samples_per_step
self.max_reduction_pairs = max_reduction_pairs
self.force_final_action = force_final_action
def step(self, state: ProgramState):
raise NotImplementedError()
def solve(self, arr: GraphArray):
state: ProgramState = ProgramState(arr,
max_reduction_pairs=self.max_reduction_pairs,
force_final_action=self.force_final_action)
num_steps = 0
while True:
num_steps += 1
state, cost, is_done = self.step(state)
if is_done:
break
return state.arr
class BlockCyclicTS(TreeSearch):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
super().__init__(seed,
max_samples_per_step,
max_reduction_pairs,
force_final_action)
def step(self, state: ProgramState):
if len(state.tnode_map) == 0:
# We're done.
return state, state.objective(state.arr.cluster_state.resources), True
action = None
for tnode_id in state.tnode_map:
action = state.get_bc_action(state.tnode_map[tnode_id][0])[0]
break
curr_cost = state.commit_action(action)
return state, curr_cost, False
class RandomTS(TreeSearch):
def __init__(self,
seed: Union[int, np.random.RandomState] = 1337,
max_samples_per_step=None,
max_reduction_pairs=None,
force_final_action=True):
super().__init__(seed,
max_samples_per_step,
max_reduction_pairs,
force_final_action)
def sample_actions(self, state: ProgramState) -> list:
if self.max_samples_per_step is None:
return state.get_all_actions()
# Subsample a set of frontier nodes to try next.
tnode_ids = list(state.tnode_map.keys())
num_tnodes = len(tnode_ids)
if num_tnodes <= self.max_samples_per_step:
tnode_id_sample = tnode_ids
else:
idx_set = set()
tnode_id_sample = []
while len(idx_set) < self.max_samples_per_step:
i = self.rs.randint(0, num_tnodes)
if i not in idx_set:
idx_set.add(i)
tnode_id_sample.append(tnode_ids[i])
actions = []
for tnode_id in tnode_id_sample:
actions += state.tnode_map[tnode_id][1]
return actions
def step(self, state: ProgramState):
# Sampling slows things down because for some reason,
# the lowest cost computations are the sums, so
# an algorithm that finds local optima keeps | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at | random_line_split |
|
scan_run.pb.go | ,proto3" json:"start_time,omitempty"`
// The time at which the ScanRun reached termination state - that the ScanRun
// is either finished or stopped by user.
EndTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// The number of URLs crawled during this ScanRun. If the scan is in progress,
// the value represents the number of URLs crawled up to now.
UrlsCrawledCount int64 `protobuf:"varint,6,opt,name=urls_crawled_count,json=urlsCrawledCount,proto3" json:"urls_crawled_count,omitempty"`
// The number of URLs tested during this ScanRun. If the scan is in progress,
// the value represents the number of URLs tested up to now. The number of
// URLs tested is usually larger than the number URLS crawled because
// typically a crawled URL is tested with multiple test payloads.
UrlsTestedCount int64 `protobuf:"varint,7,opt,name=urls_tested_count,json=urlsTestedCount,proto3" json:"urls_tested_count,omitempty"`
// Whether the scan run has found any vulnerabilities.
HasVulnerabilities bool `protobuf:"varint,8,opt,name=has_vulnerabilities,json=hasVulnerabilities,proto3" json:"has_vulnerabilities,omitempty"`
// The percentage of total completion ranging from 0 to 100.
// If the scan is in queue, the value is 0.
// If the scan is running, the value ranges from 0 to 100.
// If the scan is finished, the value is 100.
ProgressPercent int32 `protobuf:"varint,9,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanRun) Reset() { *m = ScanRun{} }
func (m *ScanRun) String() string { return proto.CompactTextString(m) }
func (*ScanRun) ProtoMessage() {}
func (*ScanRun) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e91fc2897e59cf, []int{0}
}
func (m *ScanRun) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ScanRun.Unmarshal(m, b)
}
func (m *ScanRun) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ScanRun.Marshal(b, m, deterministic)
}
func (m *ScanRun) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanRun.Merge(m, src)
}
func (m *ScanRun) XXX_Size() int {
return xxx_messageInfo_ScanRun.Size(m)
}
func (m *ScanRun) XXX_DiscardUnknown() {
xxx_messageInfo_ScanRun.DiscardUnknown(m)
}
var xxx_messageInfo_ScanRun proto.InternalMessageInfo
func (m *ScanRun) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ScanRun) GetExecutionState() ScanRun_ExecutionState {
if m != nil {
return m.ExecutionState
}
return ScanRun_EXECUTION_STATE_UNSPECIFIED
}
func (m *ScanRun) GetResultState() ScanRun_ResultState {
if m != nil {
return m.ResultState
}
return ScanRun_RESULT_STATE_UNSPECIFIED
}
func (m *ScanRun) GetStartTime() *timestamp.Timestamp {
if m != nil {
return m.StartTime
}
return nil
}
func (m *ScanRun) GetEndTime() *timestamp.Timestamp {
if m != nil |
return nil
}
func (m *ScanRun) GetUrlsCrawledCount() int64 {
if m != nil {
return m.UrlsCrawledCount
}
return 0
}
func (m *ScanRun) GetUrlsTestedCount() int64 {
if m != nil {
return m.UrlsTestedCount
}
return 0
}
func (m *ScanRun) GetHasVulnerabilities() bool {
if m != nil {
return m.HasVulnerabilities
}
return false
}
func (m *ScanRun) GetProgressPercent() int32 {
if m != nil {
return m.ProgressPercent
}
return 0
}
func init() {
proto.RegisterEnum("google.cloud.websecurityscanner.v1alpha.ScanRun_ExecutionState", ScanRun_ExecutionState_name, ScanRun_ExecutionState_value)
proto.RegisterEnum("google.cloud.websecurityscanner.v1alpha.ScanRun_ResultState", ScanRun_ResultState_name, ScanRun_ResultState_value)
proto.RegisterType((*ScanRun)(nil), "google.cloud.websecurityscanner.v1alpha.ScanRun")
}
func init() {
proto.RegisterFile("google/cloud/websecurityscanner/v1alpha/scan_run.proto", fileDescriptor_d1e91fc2897e59cf)
}
var fileDescriptor_d1e91fc2897e59cf = []byte{
// 604 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xed, 0x4e, 0xd4, 0x4c,
0x18, 0x7d, 0xcb, 0xc7, 0x7e, 0xcc, 0x12, 0xe8, 0x3b, 0xfe, 0xa9, 0x68, 0xc2, 0x86, 0x3f, 0x2e,
0x6a, 0xda, 0x88, 0xd1, 0xc4, 0x8f, 0x44, 0xa1, 0x14, 0x6d, 0x24, 0xcb, 0x3a, 0xdd, 0x35, 0xe2,
0x9f, 0x66, 0x76, 0x76, 0xe8, 0xd6, 0xb4, 0x33, 0xcd, 0x7c, 0x80, 0x86, 0x70, 0x1f, 0x5e, 0x85,
0x17, 0xe6, 0x55, 0x98, 0x4e, 0x5b, 0x91, 0x40, 0x02, 0xfe, 0xeb, 0x79, 0xce, 0x73, 0xce, 0x99,
0xcc, 0xf3, 0x4c, 0xc1, 0xf3, 0x84, 0xf3, 0x24, 0xa3, 0x1e, 0xc9, 0xb8, 0x9e, 0x79, 0xa7, 0x74,
0x2a, 0x29, 0xd1, 0x22, 0x55, 0xdf, 0x25, 0xc1, 0x8c, 0x51, 0xe1, 0x9d, 0x3c, 0xc1, 0x59, 0x31,
0xc7, 0x5e, 0x89, 0x63, 0xa1, 0x99, 0x5b, 0x08, 0xae, 0x38, 0x7c, 0x50, 0xe9, 0x5c, 0xa3, 0x73,
0xaf, 0xea, 0xdc, 0x5a, 0xb7, 0x7e, 0xb7, 0x0e, 0xc0, 0x45, 0xea, 0x09, 0x2a, 0xb9, 0x16, 0x84,
0x56, 0x1e, 0xeb, 0x1b, 0x35, 0x65, 0xd0, 0x54, 0x1f, 0x7b, 0x2a, 0xcd, 0xa9, 0x54, 0x38, 0x2f,
0xaa, | {
return m.EndTime
} | conditional_block |
scan_run.pb.go |
// A ScanRun is a output-only resource representing an actual run of the scan.
type ScanRun struct {
// The resource name of the ScanRun. The name follows the format of
// 'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
// The ScanRun IDs are generated by the system.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The execution state of the ScanRun.
ExecutionState ScanRun_ExecutionState `protobuf:"varint,2,opt,name=execution_state,json=executionState,proto3,enum=google.cloud.websecurityscanner.v1alpha.ScanRun_ExecutionState" json:"execution_state,omitempty"`
// The result state of the ScanRun. This field is only available after the
// execution state reaches "FINISHED".
ResultState ScanRun_ResultState `protobuf:"varint,3,opt,name=result_state,json=resultState,proto3,enum=google.cloud.websecurityscanner.v1alpha.ScanRun_ResultState" json:"result_state,omitempty"`
// The time at which the ScanRun started.
StartTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// The time at which the ScanRun reached termination state - that the ScanRun
// is either finished or stopped by user.
EndTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// The number of URLs crawled during this ScanRun. If the scan is in progress,
// the value represents the number of URLs crawled up to now.
UrlsCrawledCount int64 `protobuf:"varint,6,opt,name=urls_crawled_count,json=urlsCrawledCount,proto3" json:"urls_crawled_count,omitempty"`
// The number of URLs tested during this ScanRun. If the scan is in progress,
// the value represents the number of URLs tested up to now. The number of
// URLs tested is usually larger than the number URLS crawled because
// typically a crawled URL is tested with multiple test payloads.
UrlsTestedCount int64 `protobuf:"varint,7,opt,name=urls_tested_count,json=urlsTestedCount,proto3" json:"urls_tested_count,omitempty"`
// Whether the scan run has found any vulnerabilities.
HasVulnerabilities bool `protobuf:"varint,8,opt,name=has_vulnerabilities,json=hasVulnerabilities,proto3" json:"has_vulnerabilities,omitempty"`
// The percentage of total completion ranging from 0 to 100.
// If the scan is in queue, the value is 0.
// If the scan is running, the value ranges from 0 to 100.
// If the scan is finished, the value is 100.
ProgressPercent int32 `protobuf:"varint,9,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanRun) Reset() { *m = ScanRun{} }
func (m *ScanRun) String() string { return proto.CompactTextString(m) }
func (*ScanRun) ProtoMessage() {}
func (*ScanRun) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e91fc2897e59cf, []int{0}
}
func (m *ScanRun) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ScanRun.Unmarshal(m, b)
}
func (m *ScanRun) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ScanRun.Marshal(b, m, deterministic)
}
func (m *ScanRun) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanRun.Merge(m, src)
}
func (m *ScanRun) XXX_Size() int {
return xxx_messageInfo_ScanRun.Size(m)
}
func (m *ScanRun) XXX_DiscardUnknown() {
xxx_messageInfo_ScanRun.DiscardUnknown(m)
}
var xxx_messageInfo_ScanRun proto.InternalMessageInfo
func (m *ScanRun) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ScanRun) GetExecutionState() ScanRun_ExecutionState {
if m != nil {
return m.ExecutionState
}
return ScanRun_EXECUTION_STATE_UNSPECIFIED
}
func (m *ScanRun) GetResultState() ScanRun_ResultState {
if m != nil {
return m.ResultState
}
return ScanRun_RESULT_STATE_UNSPECIFIED
}
func (m *ScanRun) GetStartTime() *timestamp.Timestamp {
if m != nil {
return m.StartTime
}
return nil
}
func (m *ScanRun) GetEndTime() *timestamp.Timestamp {
if m != nil {
return m.EndTime
}
return nil
}
func (m *ScanRun) GetUrlsCrawledCount() int64 {
if m != nil {
return m.UrlsCrawledCount
}
return 0
}
func (m *ScanRun) GetUrlsTestedCount() int64 {
if m != nil {
return m.UrlsTestedCount
}
return 0
}
func (m *ScanRun) GetHasVulnerabilities() bool {
if m != nil {
return m.HasVulnerabilities
}
return false
}
func (m *ScanRun) GetProgressPercent() int32 {
if m != nil {
return m.ProgressPercent
}
return 0
}
func init() {
proto.RegisterEnum("google.cloud.websecurityscanner.v1alpha.ScanRun_ExecutionState", ScanRun_ExecutionState_name, ScanRun_ExecutionState_value)
proto.RegisterEnum("google.cloud.websecurityscanner.v1alpha.ScanRun_ResultState", ScanRun_ResultState_name, ScanRun_ResultState_value)
proto.RegisterType((*ScanRun)(nil), "google.cloud.websecurityscanner.v1alpha.ScanRun")
}
func init() {
proto.RegisterFile("google/cloud/websecurityscanner/v1alpha/scan_run.proto", fileDescriptor_d1e91fc2897e59cf)
}
var fileDescriptor_d1e91fc2897e59cf = []byte{
// 604 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xed, 0x4e, 0xd4, 0x4c,
0x18, 0x7d, 0xcb, 0xc7, 0x7e, 0xcc, 0x12, 0xe8, 0x3b, 0xfe, 0xa9, 0x68, 0xc2, 0x86, 0x3f, 0x2e,
0x6a, 0xda, 0x88, 0xd1, 0xc4, 0x8f, 0x44, 0xa1, 0x14, 0x6d, 0x24, 0xcb, 0x3a, 0xdd, 0x35, 0xe2,
0x9f, 0x66, 0x76, 0x76, 0xe8, 0xd6, 0xb4, 0x33, 0xcd, 0x7c, 0x80, 0x86, 0x70, 0x1f, 0x5e, 0x85,
0x17, 0xe6, 0x55, 0x98, 0x4e, 0x5b, 0x91, 0x40, 0x02, 0xfe, 0xeb, 0x79, 0xce, 0x73, 0xce, 0x99,
0xcc, 0xf3, 0x4c, 0xc1, 0xf3, 0x84, 0xf3, 0x24, 0xa3, 0x1e, 0xc9, 0xb8, 0x9e, 0x79, 0xa7, 0x74,
0x2a, 0x29, 0xd1, 0x22, 0x55, 0xdf, 0x25, 0xc1, 0x8c, 0x51, 0xe1, 0x9d, 0x3c, 0xc | {
return fileDescriptor_d1e91fc2897e59cf, []int{0, 1}
} | identifier_body |
|
scan_run.pb.go | // The time at which the ScanRun started.
StartTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// The time at which the ScanRun reached termination state - that the ScanRun
// is either finished or stopped by user.
EndTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// The number of URLs crawled during this ScanRun. If the scan is in progress,
// the value represents the number of URLs crawled up to now.
UrlsCrawledCount int64 `protobuf:"varint,6,opt,name=urls_crawled_count,json=urlsCrawledCount,proto3" json:"urls_crawled_count,omitempty"`
// The number of URLs tested during this ScanRun. If the scan is in progress,
// the value represents the number of URLs tested up to now. The number of
// URLs tested is usually larger than the number URLS crawled because
// typically a crawled URL is tested with multiple test payloads.
UrlsTestedCount int64 `protobuf:"varint,7,opt,name=urls_tested_count,json=urlsTestedCount,proto3" json:"urls_tested_count,omitempty"`
// Whether the scan run has found any vulnerabilities.
HasVulnerabilities bool `protobuf:"varint,8,opt,name=has_vulnerabilities,json=hasVulnerabilities,proto3" json:"has_vulnerabilities,omitempty"`
// The percentage of total completion ranging from 0 to 100.
// If the scan is in queue, the value is 0.
// If the scan is running, the value ranges from 0 to 100.
// If the scan is finished, the value is 100.
ProgressPercent int32 `protobuf:"varint,9,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanRun) Reset() { *m = ScanRun{} }
func (m *ScanRun) String() string { return proto.CompactTextString(m) }
func (*ScanRun) ProtoMessage() {}
func (*ScanRun) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e91fc2897e59cf, []int{0}
}
func (m *ScanRun) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ScanRun.Unmarshal(m, b)
}
func (m *ScanRun) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ScanRun.Marshal(b, m, deterministic)
}
func (m *ScanRun) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanRun.Merge(m, src)
}
func (m *ScanRun) XXX_Size() int {
return xxx_messageInfo_ScanRun.Size(m)
}
func (m *ScanRun) XXX_DiscardUnknown() {
xxx_messageInfo_ScanRun.DiscardUnknown(m)
}
var xxx_messageInfo_ScanRun proto.InternalMessageInfo
func (m *ScanRun) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ScanRun) GetExecutionState() ScanRun_ExecutionState {
if m != nil {
return m.ExecutionState
}
return ScanRun_EXECUTION_STATE_UNSPECIFIED
}
func (m *ScanRun) GetResultState() ScanRun_ResultState {
if m != nil {
return m.ResultState
}
return ScanRun_RESULT_STATE_UNSPECIFIED
}
func (m *ScanRun) GetStartTime() *timestamp.Timestamp {
if m != nil {
return m.StartTime
}
return nil
}
func (m *ScanRun) GetEndTime() *timestamp.Timestamp {
if m != nil {
return m.EndTime
}
return nil
}
func (m *ScanRun) GetUrlsCrawledCount() int64 {
if m != nil {
return m.UrlsCrawledCount
}
return 0
}
func (m *ScanRun) GetUrlsTestedCount() int64 {
if m != nil {
return m.UrlsTestedCount
}
return 0
}
func (m *ScanRun) GetHasVulnerabilities() bool {
if m != nil {
return m.HasVulnerabilities
}
return false
}
func (m *ScanRun) GetProgressPercent() int32 {
if m != nil {
return m.ProgressPercent
}
return 0
}
func init() {
proto.RegisterEnum("google.cloud.websecurityscanner.v1alpha.ScanRun_ExecutionState", ScanRun_ExecutionState_name, ScanRun_ExecutionState_value)
proto.RegisterEnum("google.cloud.websecurityscanner.v1alpha.ScanRun_ResultState", ScanRun_ResultState_name, ScanRun_ResultState_value)
proto.RegisterType((*ScanRun)(nil), "google.cloud.websecurityscanner.v1alpha.ScanRun")
}
func init() {
proto.RegisterFile("google/cloud/websecurityscanner/v1alpha/scan_run.proto", fileDescriptor_d1e91fc2897e59cf)
}
var fileDescriptor_d1e91fc2897e59cf = []byte{
// 604 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xed, 0x4e, 0xd4, 0x4c,
0x18, 0x7d, 0xcb, 0xc7, 0x7e, 0xcc, 0x12, 0xe8, 0x3b, 0xfe, 0xa9, 0x68, 0xc2, 0x86, 0x3f, 0x2e,
0x6a, 0xda, 0x88, 0xd1, 0xc4, 0x8f, 0x44, 0xa1, 0x14, 0x6d, 0x24, 0xcb, 0x3a, 0xdd, 0x35, 0xe2,
0x9f, 0x66, 0x76, 0x76, 0xe8, 0xd6, 0xb4, 0x33, 0xcd, 0x7c, 0x80, 0x86, 0x70, 0x1f, 0x5e, 0x85,
0x17, 0xe6, 0x55, 0x98, 0x4e, 0x5b, 0x91, 0x40, 0x02, 0xfe, 0xeb, 0x79, 0xce, 0x73, 0xce, 0x99,
0xcc, 0xf3, 0x4c, 0xc1, 0xf3, 0x84, 0xf3, 0x24, 0xa3, 0x1e, 0xc9, 0xb8, 0x9e, 0x79, 0xa7, 0x74,
0x2a, 0x29, 0xd1, 0x22, 0x55, 0xdf, 0x25, 0xc1, 0x8c, 0x51, 0xe1, 0x9d, 0x3c, 0xc1, 0x59, 0x31,
0xc7, 0x5e, 0x89, 0x63, 0xa1, 0x99, 0x5b, 0x08, 0xae, 0x38, 0x7c, 0x50, 0xe9, 0x5c, 0xa3, 0x73,
0xaf, 0xea, 0xdc, 0x5a, 0xb7, 0x7e, 0xb7, 0x0e, 0xc0, 0x45, 0xea, 0x09, 0x2a, 0xb9, 0x16, 0x84,
0x56, 0x1e, 0xeb, 0x1b, 0x35, 0x65, 0xd0, 0x54, 0x1f, 0x7b, 0x2a, 0xcd, | random_line_split |
||
scan_run.pb.go | ,proto3" json:"start_time,omitempty"`
// The time at which the ScanRun reached termination state - that the ScanRun
// is either finished or stopped by user.
EndTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// The number of URLs crawled during this ScanRun. If the scan is in progress,
// the value represents the number of URLs crawled up to now.
UrlsCrawledCount int64 `protobuf:"varint,6,opt,name=urls_crawled_count,json=urlsCrawledCount,proto3" json:"urls_crawled_count,omitempty"`
// The number of URLs tested during this ScanRun. If the scan is in progress,
// the value represents the number of URLs tested up to now. The number of
// URLs tested is usually larger than the number URLS crawled because
// typically a crawled URL is tested with multiple test payloads.
UrlsTestedCount int64 `protobuf:"varint,7,opt,name=urls_tested_count,json=urlsTestedCount,proto3" json:"urls_tested_count,omitempty"`
// Whether the scan run has found any vulnerabilities.
HasVulnerabilities bool `protobuf:"varint,8,opt,name=has_vulnerabilities,json=hasVulnerabilities,proto3" json:"has_vulnerabilities,omitempty"`
// The percentage of total completion ranging from 0 to 100.
// If the scan is in queue, the value is 0.
// If the scan is running, the value ranges from 0 to 100.
// If the scan is finished, the value is 100.
ProgressPercent int32 `protobuf:"varint,9,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanRun) Reset() { *m = ScanRun{} }
func (m *ScanRun) String() string { return proto.CompactTextString(m) }
func (*ScanRun) ProtoMessage() {}
func (*ScanRun) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e91fc2897e59cf, []int{0}
}
func (m *ScanRun) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ScanRun.Unmarshal(m, b)
}
func (m *ScanRun) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ScanRun.Marshal(b, m, deterministic)
}
func (m *ScanRun) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanRun.Merge(m, src)
}
func (m *ScanRun) XXX_Size() int {
return xxx_messageInfo_ScanRun.Size(m)
}
func (m *ScanRun) XXX_DiscardUnknown() {
xxx_messageInfo_ScanRun.DiscardUnknown(m)
}
var xxx_messageInfo_ScanRun proto.InternalMessageInfo
func (m *ScanRun) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ScanRun) GetExecutionState() ScanRun_ExecutionState {
if m != nil {
return m.ExecutionState
}
return ScanRun_EXECUTION_STATE_UNSPECIFIED
}
func (m *ScanRun) | () ScanRun_ResultState {
if m != nil {
return m.ResultState
}
return ScanRun_RESULT_STATE_UNSPECIFIED
}
func (m *ScanRun) GetStartTime() *timestamp.Timestamp {
if m != nil {
return m.StartTime
}
return nil
}
func (m *ScanRun) GetEndTime() *timestamp.Timestamp {
if m != nil {
return m.EndTime
}
return nil
}
func (m *ScanRun) GetUrlsCrawledCount() int64 {
if m != nil {
return m.UrlsCrawledCount
}
return 0
}
func (m *ScanRun) GetUrlsTestedCount() int64 {
if m != nil {
return m.UrlsTestedCount
}
return 0
}
func (m *ScanRun) GetHasVulnerabilities() bool {
if m != nil {
return m.HasVulnerabilities
}
return false
}
func (m *ScanRun) GetProgressPercent() int32 {
if m != nil {
return m.ProgressPercent
}
return 0
}
func init() {
proto.RegisterEnum("google.cloud.websecurityscanner.v1alpha.ScanRun_ExecutionState", ScanRun_ExecutionState_name, ScanRun_ExecutionState_value)
proto.RegisterEnum("google.cloud.websecurityscanner.v1alpha.ScanRun_ResultState", ScanRun_ResultState_name, ScanRun_ResultState_value)
proto.RegisterType((*ScanRun)(nil), "google.cloud.websecurityscanner.v1alpha.ScanRun")
}
func init() {
proto.RegisterFile("google/cloud/websecurityscanner/v1alpha/scan_run.proto", fileDescriptor_d1e91fc2897e59cf)
}
var fileDescriptor_d1e91fc2897e59cf = []byte{
// 604 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xed, 0x4e, 0xd4, 0x4c,
0x18, 0x7d, 0xcb, 0xc7, 0x7e, 0xcc, 0x12, 0xe8, 0x3b, 0xfe, 0xa9, 0x68, 0xc2, 0x86, 0x3f, 0x2e,
0x6a, 0xda, 0x88, 0xd1, 0xc4, 0x8f, 0x44, 0xa1, 0x14, 0x6d, 0x24, 0xcb, 0x3a, 0xdd, 0x35, 0xe2,
0x9f, 0x66, 0x76, 0x76, 0xe8, 0xd6, 0xb4, 0x33, 0xcd, 0x7c, 0x80, 0x86, 0x70, 0x1f, 0x5e, 0x85,
0x17, 0xe6, 0x55, 0x98, 0x4e, 0x5b, 0x91, 0x40, 0x02, 0xfe, 0xeb, 0x79, 0xce, 0x73, 0xce, 0x99,
0xcc, 0xf3, 0x4c, 0xc1, 0xf3, 0x84, 0xf3, 0x24, 0xa3, 0x1e, 0xc9, 0xb8, 0x9e, 0x79, 0xa7, 0x74,
0x2a, 0x29, 0xd1, 0x22, 0x55, 0xdf, 0x25, 0xc1, 0x8c, 0x51, 0xe1, 0x9d, 0x3c, 0xc1, 0x59, 0x31,
0xc7, 0x5e, 0x89, 0x63, 0xa1, 0x99, 0x5b, 0x08, 0xae, 0x38, 0x7c, 0x50, 0xe9, 0x5c, 0xa3, 0x73,
0xaf, 0xea, 0xdc, 0x5a, 0xb7, 0x7e, 0xb7, 0x0e, 0xc0, 0x45, 0xea, 0x09, 0x2a, 0xb9, 0x16, 0x84,
0x56, 0x1e, 0xeb, 0x1b, 0x35, 0x65, 0xd0, 0x54, 0x1f, 0x7b, 0x2a, 0xcd, 0xa9, 0x54, 0x38, 0x2f,
0xaa, | GetResultState | identifier_name |
onnx_helpers.py | : enable constant-folding optimization
# * input_names: setup input names as a list of string
# * output_names: setup output names as a list of string
# * opset_version: opset version of ONNX model. Latest one is recommended.
# * operator_export_type:
# * OperatorExportTypes.ONNX: normal mode
# * OperatorExportTypes.ONNX_ATEN_FALLBACK: check 'ATen' node in debug mode
# * dynamic_axes: define dynamic dimension inputs
torch.onnx.export(self.net,
args=(_image, _points),
f=export_path,
export_params=True,
do_constant_folding=True,
verbose=True,
input_names=input_names,
output_names=output_names,
opset_version=12,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
dynamic_axes=dynamic_axes)
@staticmethod
def check_onnx_model(onnx_path):
"""Check ONNX model if it is legit.
Args:
onnx_path (str): ONNX model path
"""
# Load the ONNX model
model = onnx.load(onnx_path)
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(model.graph)
def init_ort_session(self, onnx_path):
"""Initialize ONNX Runtime session
Args:
onnx_path (str): ONNX model path
"""
# Setup options for optimization
sess_options = ort.SessionOptions()
sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
self.ort_session = ort.InferenceSession(onnx_path,
sess_options=sess_options)
def inference_ort(self, image, points):
"""Inference with ONNX Runtime session
Args:
image (np.array): processed image array
points (np.array): processed points array
Returns:
np.array: probability array from model output
"""
outputs = self.ort_session.run(None,
{'image': image.astype(np.float32),
'points': points.astype(np.int32)})
return outputs
class ImageHelper:
def __init__(self, image, input_size) -> None:
"""Initialize image helper for processing image.
Args:
image ([str, np.array]): input image path or array
input_size (tuple): input size of model
"""
self.input_image = image
if isinstance(image, str):
self.input_image = self._load_image(image)
self.orig_size = self.input_image.shape[:2]
self.input_size = input_size
# 'Resize' is TRUE if input_size is not equal to image size (orig_size).
self.resize = True if self.input_size != self.orig_size else False
self._image_nd = self._preprocessing(self.input_image)
@property
def image_nd(self):
return self._image_nd
@property
def input_shape(self):
return self.input_image.shape
def _load_image(self, img_path):
if not os.path.isfile(img_path):
raise FileNotFoundError(f"{img_path}")
image = cv2.imread(img_path, -1)
hei, wid, channel = image.shape
self.orig_size = (hei, wid)
assert channel == 3, "Channel of input image is not 3."
image = cv2.cvtColor(image,
cv2.COLOR_BGR2RGB)
return image
@staticmethod
def _np_resize_image(image, size, dtype='int'):
"""Resize image for np.array
NOTE:
* Resized result from cv2 and skimage is different. Just use a workaround to resize image in floating type.
Args:
image (np.array): image array
size (tuple): input size of model
dtype (str, optional): data type of image. Defaults to 'int'.
Raises:
NotImplementedError: dtype is allowed 'int' or 'float' only.
Returns:
np.array: resized image
"""
if dtype == 'int':
_size = (size[1], size[0]) # (H,W) to (W,H)
return cv2.resize(image.astype('uint8'),
_size,
interpolation=cv2.INTER_LINEAR)
elif dtype == 'float':
return skresize(image,
size,
order=0,
mode='constant',
preserve_range=True)
else:
raise NotImplementedError(f"'{dtype}' is not a valid dtype.")
@staticmethod
def _np_transpose(image):
"""Transpose array dimension from (H,W,C) to (C,H,W)
Args:
image (np.array): image array
Returns:
np.array: resized image array
"""
return np.transpose(image, (2, 0, 1))
@staticmethod
def _np_normalize(image):
"""Normalize image array
Args:
image (np.array): image array
Returns:
np.array: normalized image array
"""
_image = image / 255.0
_mean = np.array([[[.485]], [[.456]], [[.406]]]) # shape: (3, 1, 1)
_std = np.array([[[.229]], [[.224]], [[.225]]])
_image = (_image - _mean) / _std
return _image
@staticmethod
def _np_flip_n_cat(image):
"""Horizontal flipping and concatenation for model input
Args:
image (np.array): image array
Returns:
np.array: result array
"""
image_flip = np.flip(image, (2)) # flip the channel 2: width
_image = np.expand_dims(image, axis=0)
image_flip = np.expand_dims(image_flip, axis=0)
return np.concatenate((_image, image_flip), axis=0)
def _preprocessing(self, input_image):
"""Preprocess image for model input
Args:
input_image (np.array): input image
Returns:
np.array: preprocessed image
"""
if self.resize:
input_image = self._np_resize_image(input_image,
self.input_size,
dtype='int')
image = self._np_transpose(input_image)
image = self._np_normalize(image)
image = self._np_flip_n_cat(image)
return image
@staticmethod
def _np_sigmoid(prediction):
"""Sigmoid function for activation
NOTE:
* [WARN] Numerical stability is not handled.
Args:
prediction (np.array): predicted output
Returns:
np.array: probability map after activation
"""
x = prediction
prob_map = 1 / (1 + np.exp(-x))
return prob_map
@staticmethod
def | (prediction):
"""Merge two layers output into one.
Args:
prediction (np.array): probability map with 2 layers
Returns:
np.array: single layer probability map
"""
prob_map = prediction[0][0]
prob_map_flipped = np.flip(prediction[1][0], (1)) # (H, W)
_prob = 0.5 * (prob_map + prob_map_flipped)
return _prob
@staticmethod
def _np_get_mask(prob_map, prob_thresh=0.5):
"""Binarize probability map into mask.
Args:
prob_map (np.array): probability map which range is [0, 1].
prob_thresh (float, optional): probability threshold. Defaults to 0.5.
Returns:
np.array: mask which range is [0, 255].
"""
mask = (prob_map > prob_thresh) * 255
return mask.astype(np.uint8)
def postprocessing(self, prediction, prob_thresh=0.5):
"""Post-process for model output
Args:
prediction (np.array): predicted result from model output
prob_thresh (float, optional): probability threshold. Defaults to 0.5.
Returns:
np.array: mask
"""
prob_map = self._np_sigmoid(prediction)
prob_map = self._np_merge_prediction(prob_map)
if self.resize :
prob_map = self._np_resize_image(prob_map,
self.orig_size,
dtype='float')
mask = self._np_get_mask(prob_map, prob_thresh=prob_thresh)
return mask
class PointsHelper:
def __init__(self,
click_list,
image_width,
input_size,
orig_size,
resize=False,
net_clicks_limit=20) -> None:
"""Initialize points helper for processing user clicks.
Args:
click_list (list): a list of list with shape (n, 3)
image_width (int): image width
input_size (tuple): (height, width)
orig_size (tuple): (height, width)
resize (bool, optional): flag to resize. Defaults to False.
net_clicks_limit (int, optional): limitation to the number of clicks. Defaults to 20.
"""
self.click_list = click_list
self.image_width = image_width
self.net_clicks_limit = net_clicks_limit
self.input_size = input_size
self | _np_merge_prediction | identifier_name |
onnx_helpers.py | olding: enable constant-folding optimization
# * input_names: setup input names as a list of string
# * output_names: setup output names as a list of string
# * opset_version: opset version of ONNX model. Latest one is recommended.
# * operator_export_type:
# * OperatorExportTypes.ONNX: normal mode
# * OperatorExportTypes.ONNX_ATEN_FALLBACK: check 'ATen' node in debug mode
# * dynamic_axes: define dynamic dimension inputs
torch.onnx.export(self.net,
args=(_image, _points),
f=export_path,
export_params=True,
do_constant_folding=True,
verbose=True,
input_names=input_names,
output_names=output_names,
opset_version=12,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
dynamic_axes=dynamic_axes)
@staticmethod
def check_onnx_model(onnx_path):
"""Check ONNX model if it is legit.
Args:
onnx_path (str): ONNX model path
"""
# Load the ONNX model
model = onnx.load(onnx_path)
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(model.graph)
def init_ort_session(self, onnx_path):
"""Initialize ONNX Runtime session
Args:
onnx_path (str): ONNX model path
""" | self.ort_session = ort.InferenceSession(onnx_path,
sess_options=sess_options)
def inference_ort(self, image, points):
"""Inference with ONNX Runtime session
Args:
image (np.array): processed image array
points (np.array): processed points array
Returns:
np.array: probability array from model output
"""
outputs = self.ort_session.run(None,
{'image': image.astype(np.float32),
'points': points.astype(np.int32)})
return outputs
class ImageHelper:
def __init__(self, image, input_size) -> None:
"""Initialize image helper for processing image.
Args:
image ([str, np.array]): input image path or array
input_size (tuple): input size of model
"""
self.input_image = image
if isinstance(image, str):
self.input_image = self._load_image(image)
self.orig_size = self.input_image.shape[:2]
self.input_size = input_size
# 'Resize' is TRUE if input_size is not equal to image size (orig_size).
self.resize = True if self.input_size != self.orig_size else False
self._image_nd = self._preprocessing(self.input_image)
@property
def image_nd(self):
return self._image_nd
@property
def input_shape(self):
return self.input_image.shape
def _load_image(self, img_path):
if not os.path.isfile(img_path):
raise FileNotFoundError(f"{img_path}")
image = cv2.imread(img_path, -1)
hei, wid, channel = image.shape
self.orig_size = (hei, wid)
assert channel == 3, "Channel of input image is not 3."
image = cv2.cvtColor(image,
cv2.COLOR_BGR2RGB)
return image
@staticmethod
def _np_resize_image(image, size, dtype='int'):
"""Resize image for np.array
NOTE:
* Resized result from cv2 and skimage is different. Just use a workaround to resize image in floating type.
Args:
image (np.array): image array
size (tuple): input size of model
dtype (str, optional): data type of image. Defaults to 'int'.
Raises:
NotImplementedError: dtype is allowed 'int' or 'float' only.
Returns:
np.array: resized image
"""
if dtype == 'int':
_size = (size[1], size[0]) # (H,W) to (W,H)
return cv2.resize(image.astype('uint8'),
_size,
interpolation=cv2.INTER_LINEAR)
elif dtype == 'float':
return skresize(image,
size,
order=0,
mode='constant',
preserve_range=True)
else:
raise NotImplementedError(f"'{dtype}' is not a valid dtype.")
@staticmethod
def _np_transpose(image):
"""Transpose array dimension from (H,W,C) to (C,H,W)
Args:
image (np.array): image array
Returns:
np.array: resized image array
"""
return np.transpose(image, (2, 0, 1))
@staticmethod
def _np_normalize(image):
"""Normalize image array
Args:
image (np.array): image array
Returns:
np.array: normalized image array
"""
_image = image / 255.0
_mean = np.array([[[.485]], [[.456]], [[.406]]]) # shape: (3, 1, 1)
_std = np.array([[[.229]], [[.224]], [[.225]]])
_image = (_image - _mean) / _std
return _image
@staticmethod
def _np_flip_n_cat(image):
"""Horizontal flipping and concatenation for model input
Args:
image (np.array): image array
Returns:
np.array: result array
"""
image_flip = np.flip(image, (2)) # flip the channel 2: width
_image = np.expand_dims(image, axis=0)
image_flip = np.expand_dims(image_flip, axis=0)
return np.concatenate((_image, image_flip), axis=0)
def _preprocessing(self, input_image):
"""Preprocess image for model input
Args:
input_image (np.array): input image
Returns:
np.array: preprocessed image
"""
if self.resize:
input_image = self._np_resize_image(input_image,
self.input_size,
dtype='int')
image = self._np_transpose(input_image)
image = self._np_normalize(image)
image = self._np_flip_n_cat(image)
return image
@staticmethod
def _np_sigmoid(prediction):
"""Sigmoid function for activation
NOTE:
* [WARN] Numerical stability is not handled.
Args:
prediction (np.array): predicted output
Returns:
np.array: probability map after activation
"""
x = prediction
prob_map = 1 / (1 + np.exp(-x))
return prob_map
@staticmethod
def _np_merge_prediction(prediction):
"""Merge two layers output into one.
Args:
prediction (np.array): probability map with 2 layers
Returns:
np.array: single layer probability map
"""
prob_map = prediction[0][0]
prob_map_flipped = np.flip(prediction[1][0], (1)) # (H, W)
_prob = 0.5 * (prob_map + prob_map_flipped)
return _prob
@staticmethod
def _np_get_mask(prob_map, prob_thresh=0.5):
"""Binarize probability map into mask.
Args:
prob_map (np.array): probability map which range is [0, 1].
prob_thresh (float, optional): probability threshold. Defaults to 0.5.
Returns:
np.array: mask which range is [0, 255].
"""
mask = (prob_map > prob_thresh) * 255
return mask.astype(np.uint8)
def postprocessing(self, prediction, prob_thresh=0.5):
"""Post-process for model output
Args:
prediction (np.array): predicted result from model output
prob_thresh (float, optional): probability threshold. Defaults to 0.5.
Returns:
np.array: mask
"""
prob_map = self._np_sigmoid(prediction)
prob_map = self._np_merge_prediction(prob_map)
if self.resize :
prob_map = self._np_resize_image(prob_map,
self.orig_size,
dtype='float')
mask = self._np_get_mask(prob_map, prob_thresh=prob_thresh)
return mask
class PointsHelper:
def __init__(self,
click_list,
image_width,
input_size,
orig_size,
resize=False,
net_clicks_limit=20) -> None:
"""Initialize points helper for processing user clicks.
Args:
click_list (list): a list of list with shape (n, 3)
image_width (int): image width
input_size (tuple): (height, width)
orig_size (tuple): (height, width)
resize (bool, optional): flag to resize. Defaults to False.
net_clicks_limit (int, optional): limitation to the number of clicks. Defaults to 20.
"""
self.click_list = click_list
self.image_width = image_width
self.net_clicks_limit = net_clicks_limit
self.input_size = input_size
| # Setup options for optimization
sess_options = ort.SessionOptions()
sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
| random_line_split |
onnx_helpers.py | ) / _std
return _image
@staticmethod
def _np_flip_n_cat(image):
"""Horizontal flipping and concatenation for model input
Args:
image (np.array): image array
Returns:
np.array: result array
"""
image_flip = np.flip(image, (2)) # flip the channel 2: width
_image = np.expand_dims(image, axis=0)
image_flip = np.expand_dims(image_flip, axis=0)
return np.concatenate((_image, image_flip), axis=0)
def _preprocessing(self, input_image):
"""Preprocess image for model input
Args:
input_image (np.array): input image
Returns:
np.array: preprocessed image
"""
if self.resize:
input_image = self._np_resize_image(input_image,
self.input_size,
dtype='int')
image = self._np_transpose(input_image)
image = self._np_normalize(image)
image = self._np_flip_n_cat(image)
return image
@staticmethod
def _np_sigmoid(prediction):
"""Sigmoid function for activation
NOTE:
* [WARN] Numerical stability is not handled.
Args:
prediction (np.array): predicted output
Returns:
np.array: probability map after activation
"""
x = prediction
prob_map = 1 / (1 + np.exp(-x))
return prob_map
@staticmethod
def _np_merge_prediction(prediction):
"""Merge two layers output into one.
Args:
prediction (np.array): probability map with 2 layers
Returns:
np.array: single layer probability map
"""
prob_map = prediction[0][0]
prob_map_flipped = np.flip(prediction[1][0], (1)) # (H, W)
_prob = 0.5 * (prob_map + prob_map_flipped)
return _prob
@staticmethod
def _np_get_mask(prob_map, prob_thresh=0.5):
"""Binarize probability map into mask.
Args:
prob_map (np.array): probability map which range is [0, 1].
prob_thresh (float, optional): probability threshold. Defaults to 0.5.
Returns:
np.array: mask which range is [0, 255].
"""
mask = (prob_map > prob_thresh) * 255
return mask.astype(np.uint8)
def postprocessing(self, prediction, prob_thresh=0.5):
"""Post-process for model output
Args:
prediction (np.array): predicted result from model output
prob_thresh (float, optional): probability threshold. Defaults to 0.5.
Returns:
np.array: mask
"""
prob_map = self._np_sigmoid(prediction)
prob_map = self._np_merge_prediction(prob_map)
if self.resize :
prob_map = self._np_resize_image(prob_map,
self.orig_size,
dtype='float')
mask = self._np_get_mask(prob_map, prob_thresh=prob_thresh)
return mask
class PointsHelper:
def __init__(self,
click_list,
image_width,
input_size,
orig_size,
resize=False,
net_clicks_limit=20) -> None:
"""Initialize points helper for processing user clicks.
Args:
click_list (list): a list of list with shape (n, 3)
image_width (int): image width
input_size (tuple): (height, width)
orig_size (tuple): (height, width)
resize (bool, optional): flag to resize. Defaults to False.
net_clicks_limit (int, optional): limitation to the number of clicks. Defaults to 20.
"""
self.click_list = click_list
self.image_width = image_width
self.net_clicks_limit = net_clicks_limit
self.input_size = input_size
self.orig_size = orig_size
self.resize = resize
if self.resize:
self.image_width = self.input_size[1]
self._points_nd = self._preprocessing()
@property
def points_nd(self):
return self._points_nd
@staticmethod
def _get_points_nd(clicks_lists, net_clicks_limit):
"""Generate specific format of points array.
Args:
clicks_lists (List[List]): clicks_lists with raw and flipped clicks.
net_clicks_limit (int): limitation to the number of clicks.
Returns:
np.array: specific format of points array with some (-1, -1) filling points.
"""
total_clicks = []
num_pos_clicks = [sum(x.is_positive for x in clicks_list) for clicks_list in clicks_lists]
num_neg_clicks = [len(clicks_list) - num_pos for clicks_list, num_pos in zip(clicks_lists, num_pos_clicks)]
num_max_points = max(num_pos_clicks + num_neg_clicks)
if net_clicks_limit is not None:
num_max_points = min(net_clicks_limit, num_max_points)
num_max_points = max(1, num_max_points)
for clicks_list in clicks_lists:
clicks_list = clicks_list[:net_clicks_limit]
pos_clicks = [click.coords for click in clicks_list if click.is_positive]
pos_clicks = pos_clicks + (num_max_points - len(pos_clicks)) * [(-1, -1)]
neg_clicks = [click.coords for click in clicks_list if not click.is_positive]
neg_clicks = neg_clicks + (num_max_points - len(neg_clicks)) * [(-1, -1)]
total_clicks.append(pos_clicks + neg_clicks)
return np.array(total_clicks)
@staticmethod
def _points_transform(clicks_lists, image_width):
"""Transform original points list into flipped points and concatenate these two list.
Args:
clicks_lists (List[List]): clicks list. (Ex: [clicks_lists])
image_width (int): image width for flipping
Returns:
List[List]: clicks list with re-formating. (Ex: [clicks_lists, clicks_lists_flipped])
"""
clicks_lists_flipped = []
for clicks_list in clicks_lists:
clicks_list_flipped = []
for click in clicks_list:
# Horizontal flip
_y = image_width - click.coords[1] - 1
_click = clicker.Click(is_positive=click.is_positive,
coords=(click.coords[0], _y))
clicks_list_flipped.append(_click)
clicks_lists_flipped.append(clicks_list_flipped)
clicks_lists = clicks_lists + clicks_lists_flipped
return clicks_lists
@staticmethod
def _get_clickers(click_list):
"""Wrap clicks by 'Clicker' class
Args:
click_list (List[List]): user click list
Returns:
Clicker: clicker object
"""
clickers = clicker.Clicker()
for _click in click_list:
click = clicker.Click(is_positive=_click[2],
coords=(_click[1], _click[0])) # (x, y)
clickers.add_click(click)
return clickers
@staticmethod
def _remapping_coord(click_list, input_size, orig_size):
"""Remap the coordinate if flag of resize is TRUE.
Args:
click_list (List[List]): user click list with shape (n, 3)
input_size (tuple): input size of model (H, W)
orig_size (tuple): original image size (H, W)
Returns:
List[List]: click list after coordinate remapping
"""
input_coord = [input_size[1], input_size[0], 1]
orig_coord = [orig_size[1], orig_size[0], 1]
_click_list = list()
for click in click_list:
click = list(map(truediv, click, orig_coord))
click = list(map(mul, click, input_coord))
click = list(map(int, click))
_click_list.append(click)
return _click_list
def _preprocessing(self):
"""Pre-processing the user clicks to points array
Returns:
np.array: points array for model input
"""
if self.resize:
self.click_list = self._remapping_coord(self.click_list,
self.input_size,
self.orig_size)
clickers = self._get_clickers(self.click_list)
clicks_list = clickers.get_clicks()
clicks_lists = self._points_transform([clicks_list], self.image_width)
points_nd = self._get_points_nd(clicks_lists, self.net_clicks_limit)
return points_nd
def onnx_interface(img_np, click_list, onnx_handler, cfg):
| """ONNX interface contained main flow
Args:
img_np (np.array): image array in RBG-format
click_list (List[List]): user click list
onnx_handler (ONNXHandler): ONNX handler
cfg (dict): config
Returns:
np.array: mask array with original image shape (H, W)
"""
# [pre-processing][image]
img_helper = ImageHelper(img_np,
input_size=cfg['input_size'])
# Update config for PointsHelper
cfg['orig_size'] = img_helper.orig_size
cfg['resize'] = img_helper.resize
cfg['image_width'] = img_helper.input_shape[1]
| identifier_body |
|
onnx_helpers.py | : enable constant-folding optimization
# * input_names: setup input names as a list of string
# * output_names: setup output names as a list of string
# * opset_version: opset version of ONNX model. Latest one is recommended.
# * operator_export_type:
# * OperatorExportTypes.ONNX: normal mode
# * OperatorExportTypes.ONNX_ATEN_FALLBACK: check 'ATen' node in debug mode
# * dynamic_axes: define dynamic dimension inputs
torch.onnx.export(self.net,
args=(_image, _points),
f=export_path,
export_params=True,
do_constant_folding=True,
verbose=True,
input_names=input_names,
output_names=output_names,
opset_version=12,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
dynamic_axes=dynamic_axes)
@staticmethod
def check_onnx_model(onnx_path):
"""Check ONNX model if it is legit.
Args:
onnx_path (str): ONNX model path
"""
# Load the ONNX model
model = onnx.load(onnx_path)
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(model.graph)
def init_ort_session(self, onnx_path):
"""Initialize ONNX Runtime session
Args:
onnx_path (str): ONNX model path
"""
# Setup options for optimization
sess_options = ort.SessionOptions()
sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
self.ort_session = ort.InferenceSession(onnx_path,
sess_options=sess_options)
def inference_ort(self, image, points):
"""Inference with ONNX Runtime session
Args:
image (np.array): processed image array
points (np.array): processed points array
Returns:
np.array: probability array from model output
"""
outputs = self.ort_session.run(None,
{'image': image.astype(np.float32),
'points': points.astype(np.int32)})
return outputs
class ImageHelper:
def __init__(self, image, input_size) -> None:
"""Initialize image helper for processing image.
Args:
image ([str, np.array]): input image path or array
input_size (tuple): input size of model
"""
self.input_image = image
if isinstance(image, str):
self.input_image = self._load_image(image)
self.orig_size = self.input_image.shape[:2]
self.input_size = input_size
# 'Resize' is TRUE if input_size is not equal to image size (orig_size).
self.resize = True if self.input_size != self.orig_size else False
self._image_nd = self._preprocessing(self.input_image)
@property
def image_nd(self):
return self._image_nd
@property
def input_shape(self):
return self.input_image.shape
def _load_image(self, img_path):
if not os.path.isfile(img_path):
raise FileNotFoundError(f"{img_path}")
image = cv2.imread(img_path, -1)
hei, wid, channel = image.shape
self.orig_size = (hei, wid)
assert channel == 3, "Channel of input image is not 3."
image = cv2.cvtColor(image,
cv2.COLOR_BGR2RGB)
return image
@staticmethod
def _np_resize_image(image, size, dtype='int'):
"""Resize image for np.array
NOTE:
* Resized result from cv2 and skimage is different. Just use a workaround to resize image in floating type.
Args:
image (np.array): image array
size (tuple): input size of model
dtype (str, optional): data type of image. Defaults to 'int'.
Raises:
NotImplementedError: dtype is allowed 'int' or 'float' only.
Returns:
np.array: resized image
"""
if dtype == 'int':
_size = (size[1], size[0]) # (H,W) to (W,H)
return cv2.resize(image.astype('uint8'),
_size,
interpolation=cv2.INTER_LINEAR)
elif dtype == 'float':
return skresize(image,
size,
order=0,
mode='constant',
preserve_range=True)
else:
raise NotImplementedError(f"'{dtype}' is not a valid dtype.")
@staticmethod
def _np_transpose(image):
"""Transpose array dimension from (H,W,C) to (C,H,W)
Args:
image (np.array): image array
Returns:
np.array: resized image array
"""
return np.transpose(image, (2, 0, 1))
@staticmethod
def _np_normalize(image):
"""Normalize image array
Args:
image (np.array): image array
Returns:
np.array: normalized image array
"""
_image = image / 255.0
_mean = np.array([[[.485]], [[.456]], [[.406]]]) # shape: (3, 1, 1)
_std = np.array([[[.229]], [[.224]], [[.225]]])
_image = (_image - _mean) / _std
return _image
@staticmethod
def _np_flip_n_cat(image):
"""Horizontal flipping and concatenation for model input
Args:
image (np.array): image array
Returns:
np.array: result array
"""
image_flip = np.flip(image, (2)) # flip the channel 2: width
_image = np.expand_dims(image, axis=0)
image_flip = np.expand_dims(image_flip, axis=0)
return np.concatenate((_image, image_flip), axis=0)
def _preprocessing(self, input_image):
"""Preprocess image for model input
Args:
input_image (np.array): input image
Returns:
np.array: preprocessed image
"""
if self.resize:
|
image = self._np_transpose(input_image)
image = self._np_normalize(image)
image = self._np_flip_n_cat(image)
return image
@staticmethod
def _np_sigmoid(prediction):
"""Sigmoid function for activation
NOTE:
* [WARN] Numerical stability is not handled.
Args:
prediction (np.array): predicted output
Returns:
np.array: probability map after activation
"""
x = prediction
prob_map = 1 / (1 + np.exp(-x))
return prob_map
@staticmethod
def _np_merge_prediction(prediction):
"""Merge two layers output into one.
Args:
prediction (np.array): probability map with 2 layers
Returns:
np.array: single layer probability map
"""
prob_map = prediction[0][0]
prob_map_flipped = np.flip(prediction[1][0], (1)) # (H, W)
_prob = 0.5 * (prob_map + prob_map_flipped)
return _prob
@staticmethod
def _np_get_mask(prob_map, prob_thresh=0.5):
"""Binarize probability map into mask.
Args:
prob_map (np.array): probability map which range is [0, 1].
prob_thresh (float, optional): probability threshold. Defaults to 0.5.
Returns:
np.array: mask which range is [0, 255].
"""
mask = (prob_map > prob_thresh) * 255
return mask.astype(np.uint8)
def postprocessing(self, prediction, prob_thresh=0.5):
"""Post-process for model output
Args:
prediction (np.array): predicted result from model output
prob_thresh (float, optional): probability threshold. Defaults to 0.5.
Returns:
np.array: mask
"""
prob_map = self._np_sigmoid(prediction)
prob_map = self._np_merge_prediction(prob_map)
if self.resize :
prob_map = self._np_resize_image(prob_map,
self.orig_size,
dtype='float')
mask = self._np_get_mask(prob_map, prob_thresh=prob_thresh)
return mask
class PointsHelper:
def __init__(self,
click_list,
image_width,
input_size,
orig_size,
resize=False,
net_clicks_limit=20) -> None:
"""Initialize points helper for processing user clicks.
Args:
click_list (list): a list of list with shape (n, 3)
image_width (int): image width
input_size (tuple): (height, width)
orig_size (tuple): (height, width)
resize (bool, optional): flag to resize. Defaults to False.
net_clicks_limit (int, optional): limitation to the number of clicks. Defaults to 20.
"""
self.click_list = click_list
self.image_width = image_width
self.net_clicks_limit = net_clicks_limit
self.input_size = input_size
| input_image = self._np_resize_image(input_image,
self.input_size,
dtype='int') | conditional_block |
table.go | services service.List) (PanelInfo, error)
// InsertData 插入資料
InsertData(dataList form.Values) error
// UpdateData 更新資料
UpdateData(dataList form.Values) error
// DeleteData 刪除資料
DeleteData(pk string) error
}
// DefaultBaseTable 建立預設的BaseTable(同時也是Table(interface))
func DefaultBaseTable(cfgs ...ConfigTable) Table {
var cfg ConfigTable
if len(cfgs) > 0 && cfgs[0].PrimaryKey.Name != "" {
cfg = cfgs[0]
} else {
cfg = DefaultConfig()
}
return &BaseTable{
Informatoin: types.DefaultInformationPanel(cfg.PrimaryKey.Name),
Form: types.DefaultFormPanel(),
CanAdd: cfg.CanAdd,
EditAble: cfg.EditAble,
DeleteAble: cfg.DeleteAble,
PrimaryKey: cfg.PrimaryKey,
connectionDriver: cfg.Driver,
}
}
// -----BaseTable的所有Table方法-----start
// GetPrimaryKey 取得主鍵
func (base *BaseTable) GetPrimaryKey() PrimaryKey {
return base.PrimaryKey
}
// GetInfo 設置主鍵取得InformationPanel
func (base *BaseTable) GetInfo() *types.InformationPanel {
return base.Informatoin.SetPrimaryKey(base.PrimaryKey.Name, base.PrimaryKey.Type)
}
// GetFormPanel 設置主鍵並取得FormPanel
func (base *BaseTable) GetFormPanel() *types.FormPanel {
return base.Form.SetPrimaryKey(base.PrimaryKey.Name, base.PrimaryKey.Type)
}
// GetNewForm 處理並設置表單欄位細節資訊(新增資料的表單欄位)
func (base *BaseTable) GetNewForm(services service.List) FormInfo {
return FormInfo{FieldList: base.Form.SetAllowAddValueOfField(services, base.GetSQLByService),
Title: base.Form.Title,
Description: base.Form.Description}
}
// GetDataWithID 透過id取得資料並將值、預設值設置至BaseTable.Form.FormFields
func (base *BaseTable) GetDataWithID(param parameter.Parameters, services service.List) (FormInfo, error) {
var (
// FindPK 取得__pk的值(單個) | res map[string]interface{}
args = []interface{}{id}
fields, joins, groupBy = "", "", ""
tableName = base.GetFormPanel().Table
pk = tableName + "." + base.PrimaryKey.Name
queryStatement = "select %s from %s" + " %s where " + pk + " = ? %s "
)
// 所有欄位
columns, _ := base.getColumns(base.Form.Table, services)
for _, field := range base.Form.FieldList {
if field.Field != pk && utils.InArray(columns, field.Field) && !field.Joins.Valid() {
fields += tableName + "." + field.Field + ","
}
}
fields += pk
queryCmd := fmt.Sprintf(queryStatement, fields, tableName, joins, groupBy)
result, err := base.getConnectionByService(services).Query(queryCmd, args...)
if err != nil {
return FormInfo{Title: base.Form.Title, Description: base.Form.Description}, err
}
if len(result) == 0 {
return FormInfo{Title: base.Form.Title, Description: base.Form.Description}, errors.New("錯誤的id")
}
res = result[0]
var fieldList = base.Form.FieldWithValue(base.PrimaryKey.Name,
id, columns, res, services, base.GetSQLByService)
return FormInfo{
FieldList: fieldList,
Title: base.Form.Title,
Description: base.Form.Description,
}, nil
}
// GetData 從資料庫取得頁面需要顯示的資料,回傳每一筆資料資訊、欄位資訊、可過濾欄位資訊...等
func (base *BaseTable) GetData(params parameter.Parameters, services service.List) (PanelInfo, error) {
return base.getDataFromDatabase(params, services)
}
// InsertData 插入資料
func (base *BaseTable) InsertData(dataList form.Values) error {
if base.Form.InsertFunc != nil {
err := base.Form.InsertFunc(dataList)
if err != nil {
return err
}
}
return nil
}
// UpdateData 更新資料
func (base *BaseTable) UpdateData(dataList form.Values) error {
if base.Form.UpdateFunc != nil {
err := base.Form.UpdateFunc(dataList)
if err != nil {
return err
}
}
return nil
}
// DeleteData 刪除資料
func (base *BaseTable) DeleteData(id string) error {
idArr := strings.Split(id, ",")
if base.Informatoin.DeleteFunc != nil {
err := base.Informatoin.DeleteFunc(idArr)
return err
}
return nil
}
// -----BaseTable的所有Table方法-----end
// GetSQLByService 設置db.SQL(struct)的Connection、CRUD
func (base *BaseTable) GetSQLByService(services service.List) *db.SQL {
if base.connectionDriver != "" {
return db.SetConnectionAndCRUD(db.ConvertServiceToConnection(services.Get(base.connectionDriver)))
}
return nil
}
// getConnectionByService 取得Connection(interface)
func (base *BaseTable) getConnectionByService(services service.List) db.Connection {
if base.connectionDriver != "" {
return db.ConvertServiceToConnection(services.Get(base.connectionDriver))
}
return nil
}
// getColumns 取得所有欄位
func (base *BaseTable) getColumns(table string, services service.List) ([]string, bool) {
var auto bool
columnsModel, _ := base.GetSQLByService(services).Table(table).ShowColumns()
columns := make([]string, len(columnsModel))
for key, model := range columnsModel {
columns[key] = model["Field"].(string)
if columns[key] == base.PrimaryKey.Name { // 如果為主鍵
if v, ok := model["Extra"].(string); ok {
if v == "auto_increment" {
auto = true
}
}
}
}
return columns, auto
}
// getDataFromDatabase 從資料庫取得頁面需要顯示的資料,回傳每一筆資料資訊、欄位資訊、可過濾欄位資訊...等
func (base *BaseTable) getDataFromDatabase(params parameter.Parameters, services service.List) (PanelInfo, error) {
var (
connection = base.getConnectionByService(services)
ids = params.FindPKs() // FindPKs 取得__pk的值(多個)
countStatement string
queryStatement string
primaryKey = base.Informatoin.Table + "." + base.PrimaryKey.Name // 主鍵
wheres = ""
args = make([]interface{}, 0)
whereArgs = make([]interface{}, 0)
existKeys = make([]string, 0)
size int
)
if len(ids) > 0 {
queryStatement = "select %s from %s%s where " + primaryKey + " in (%s) %s ORDER BY %s.%s %s"
countStatement = "select count(*) from %s %s where " + primaryKey + " in (%s)"
} else {
queryStatement = "select %s from %s%s %s %s order by %s.%s %s LIMIT ? OFFSET ?"
countStatement = "select count(*) from %s %s %s"
}
// 取得所有欄位
columns, _ := base.getColumns(base.Informatoin.Table, services)
// getFieldInformationAndJoinOrderAndFilterForm 取得欄位資訊、join的語法及table、可過濾欄位資訊
fieldList, fields, joinFields, joins, joinTables, filterForm := base.getFieldInformationAndJoinOrderAndFilterForm(params, columns)
// 加上主鍵
fields += primaryKey
// 所有欄位
allFields := fields
if joinFields != "" {
// 加上join其他表的欄位(ex: group_concat(roles.`name` separator 'CkN694kH') as roles_join_name,)
allFields += "," + joinFields[:len(joinFields)-1]
}
if len(ids) > 0 {
for _, value := range ids {
if value != "" {
wheres += "?,"
args = append(args, value)
}
}
wheres = wheres[:len(wheres)-1]
} else {
// WhereStatement 處理過濾的where語法
wheres, whereArgs, existKeys = params.WhereStatement(wheres, base.Informatoin.Table, whereArgs, columns, existKeys)
wheres, whereArgs = base.Informatoin.Wheres.WhereStatement(wheres, whereArgs, existKeys, columns)
if wheres != "" {
wheres = " where " + wheres
}
if connection.Name() == "mysql" {
pageSizeInt, _ := strconv.Atoi(params.PageSize)
pageInt, _ := strconv.Atoi(params.Page)
args = append(whereArgs, pageSizeInt, (pageInt-1)*(pageSizeInt))
}
}
group | id = param.FindPK() | random_line_split |
table.go | )
countStatement string
queryStatement string
primaryKey = base.Informatoin.Table + "." + base.PrimaryKey.Name // 主鍵
wheres = ""
args = make([]interface{}, 0)
whereArgs = make([]interface{}, 0)
existKeys = make([]string, 0)
size int
)
if len(ids) > 0 {
queryStatement = "select %s from %s%s where " + primaryKey + " in (%s) %s ORDER BY %s.%s %s"
countStatement = "select count(*) from %s %s where " + primaryKey + " in (%s)"
} else {
queryStatement = "select %s from %s%s %s %s order by %s.%s %s LIMIT ? OFFSET ?"
countStatement = "select count(*) from %s %s %s"
}
// 取得所有欄位
columns, _ := base.getColumns(base.Informatoin.Table, services)
// getFieldInformationAndJoinOrderAndFilterForm 取得欄位資訊、join的語法及table、可過濾欄位資訊
fieldList, fields, joinFields, joins, joinTables, filterForm := base.getFieldInformationAndJoinOrderAndFilterForm(params, columns)
// 加上主鍵
fields += primaryKey
// 所有欄位
allFields := fields
if joinFields != "" {
// 加上join其他表的欄位(ex: group_concat(roles.`name` separator 'CkN694kH') as roles_join_name,)
allFields += "," + joinFields[:len(joinFields)-1]
}
if len(ids) > 0 {
for _, value := range ids {
if value != "" {
wheres += "?,"
args = append(args, value)
}
}
wheres = wheres[:len(wheres)-1]
} else {
// WhereStatement 處理過濾的where語法
wheres, whereArgs, existKeys = params.WhereStatement(wheres, base.Informatoin.Table, whereArgs, columns, existKeys)
wheres, whereArgs = base.Informatoin.Wheres.WhereStatement(wheres, whereArgs, existKeys, columns)
if wheres != "" {
wheres = " where " + wheres
}
if connection.Name() == "mysql" {
pageSizeInt, _ := strconv.Atoi(params.PageSize)
pageInt, _ := strconv.Atoi(params.Page)
args = append(whereArgs, pageSizeInt, (pageInt-1)*(pageSizeInt))
}
}
groupBy := ""
if len(joinTables) > 0 {
if connection.Name() == "mysql" {
groupBy = " GROUP BY " + primaryKey
}
}
// sql 語法
queryCmd := fmt.Sprintf(queryStatement, allFields, base.Informatoin.Table, joins, wheres, groupBy,
base.Informatoin.Table, params.SortField, params.SortType)
res, err := connection.Query(queryCmd, args...)
if err != nil {
return PanelInfo{}, err
}
// 頁面上顯示的所有資料
infoList := make([]map[string]types.InfoItem, 0)
for i := 0; i < len(res); i++ {
infoList = append(infoList, base.getTemplateDataModel(res[i], params, columns))
}
// 計算資料數
if len(ids) == 0 {
countCmd := fmt.Sprintf(countStatement, base.Informatoin.Table, joins, wheres)
total, err := connection.Query(countCmd, whereArgs...)
if err != nil {
return PanelInfo{}, err
}
if base.connectionDriver == "mysql" {
size = int(total[0]["count(*)"].(int64))
}
}
// 設置Paginator.option(在被選中顯示資料筆數的地方加上select)
paginator := paginator.GetPaginatorInformation(size, params)
paginator.PageSizeList = base.Informatoin.GetPageSizeList()
paginator.Option = make(map[string]template.HTML, len(paginator.PageSizeList))
for i := 0; i < len(paginator.PageSizeList); i++ {
paginator.Option[paginator.PageSizeList[i]] = template.HTML("")
}
paginator.Option[params.PageSize] = template.HTML("select")
return PanelInfo{
InfoList: infoList,
FieldList: fieldList,
Paginator: paginator,
PrimaryKey: base.PrimaryKey.Name,
Title: base.Informatoin.Title,
FilterFormData: filterForm,
Description: base.Informatoin.Description,
}, nil
}
// getTemplateDataModel 取得並處理模板的每一筆資料
func (base *BaseTable) getTemplateDataModel(res map[string]interface{}, params parameter.Parameters, columns []string) map[string]types.InfoItem {
var templateDataModel = make(map[string]types.InfoItem)
headField := ""
// 取得id
primaryKeyValue := db.GetValueFromDatabaseType(base.PrimaryKey.Type, res[base.PrimaryKey.Name])
for _, field := range base.Informatoin.FieldList {
headField = field.Field
// 如果有關聯其他表
if field.Joins.Valid() {
// ex: roles_join_name
headField = field.Joins.Last().JoinTable + "_join_" + field.Field
}
if field.Hide {
continue
}
if !utils.InArrayWithoutEmpty(params.Columns, headField) {
continue
}
typeName := field.TypeName
if field.Joins.Valid() {
typeName = db.Varchar
}
// 每個欄位的值
combineValue := db.GetValueFromDatabaseType(typeName, res[headField]).String()
var value interface{}
if len(columns) == 0 || utils.InArray(columns, headField) || field.Joins.Valid() {
value = field.FieldDisplay.DisplayFunc(types.FieldModel{
ID: primaryKeyValue.String(),
Value: combineValue,
Row: res,
})
} else {
value = field.FieldDisplay.DisplayFunc(types.FieldModel{
ID: primaryKeyValue.String(),
Value: "",
Row: res,
})
}
if valueStr, ok := value.(string); ok {
templateDataModel[headField] = types.InfoItem{
Content: template.HTML(valueStr),
Value: combineValue,
}
} else {
// 角色欄位會執行
templateDataModel[headField] = types.InfoItem{
Content: value.(template.HTML),
Value: combineValue,
}
}
}
// 不管有沒有顯示id(主鍵)欄位,都要加上id的欄位資訊
primaryKeyField := base.Informatoin.FieldList.GetFieldByFieldName(base.PrimaryKey.Name)
value := primaryKeyField.FieldDisplay.DisplayFunc(types.FieldModel{
ID: primaryKeyValue.String(),
Value: primaryKeyValue.String(),
Row: res,
})
if valueStr, ok := value.(string); ok {
templateDataModel[base.PrimaryKey.Name] = types.InfoItem{
Content: template.HTML(valueStr),
Value: primaryKeyValue.String(),
}
} else {
// 角色欄位會執行
templateDataModel[base.PrimaryKey.Name] = types.InfoItem{
Content: value.(template.HTML),
Value: primaryKeyValue.String(),
}
}
return templateDataModel
}
// getFieldInformationAndJoinOrderAndFilterForm 取得欄位資訊、join的語法及table、可過濾欄位資訊
func (base *BaseTable) getFieldInformationAndJoinOrderAndFilterForm(params parameter.Parameters, columns []string) (types.FieldList,
string, string, string, []string, []types.FormField) {
return base.Informatoin.FieldList.GetFieldInformationAndJoinOrderAndFilterForm(types.TableInfo{
Table: base.Informatoin.Table,
Delimiter: base.getDelimiter(),
Driver: base.connectionDriver,
PrimaryKey: base.PrimaryKey.Name,
}, params, columns)
}
// getDelimiter 取得分隔符號
func (base *BaseTable) getDelimiter() string {
if base.connectionDriver == "mysql" {
return "'"
}
return ""
}
// SetServices 設置serivces,services是套件中的全域變數
func SetServices(srv service.List) {
lock.Lock()
defer lock.Unlock()
if atomic.LoadUint32(&count) != 0 {
panic("can not initialize twice")
}
services = srv
}
// Combine 將頁面及表單資訊加入List中
func (g List) Combine(list List) List {
for key, gen := range list {
if _, ok := g[key]; !ok {
g[key] = gen
}
}
return g
}
// CombineAll 將所有頁面及表單資訊加入List中
func (g List) CombineAll(gens []List) List {
for _, list := range gens {
for key, gen := range list {
if _, ok := g[key]; !ok {
g[key] = gen
}
}
}
return g
}
| identifier_body |
||
table.go | services service.List) (PanelInfo, error)
// InsertData 插入資料
InsertData(dataList form.Values) error
// UpdateData 更新資料
UpdateData(dataList form.Values) error
// DeleteData 刪除資料
DeleteData(pk string) error
}
// DefaultBaseTable 建立預設的BaseTable(同時也是Table(interface))
func DefaultBaseTable(cfgs ...ConfigTable) Table {
var cfg ConfigTable
if len(cfgs) > 0 && cfgs[0].PrimaryKey.Name != "" {
cfg = cfgs[0]
} else {
cfg = DefaultConfig()
}
return &BaseTable{
Informatoin: types.DefaultInformationPanel(cfg.PrimaryKey.Name),
Form: types.DefaultFormPanel(),
CanAdd: cfg.CanAdd,
EditAble: cfg.EditAble,
DeleteAble: cfg.DeleteAble,
PrimaryKey: cfg.PrimaryKey,
connectionDriver: cfg.Driver,
}
}
// -----BaseTable的所有Table方法-----start
// GetPrimaryKey 取得主鍵
func (base *BaseTable) GetPrimaryKey() PrimaryKey {
return base.PrimaryKey
}
// GetInfo 設置主鍵取得InformationPanel
func (base *BaseTable) GetInfo() *types.InformationPanel {
return base.Informatoin.SetPrimaryKey(base.PrimaryKey.Name, base.PrimaryKey.Type)
}
// GetFormPanel 設置主鍵並取得FormPanel
func (base *BaseTable) GetFormPanel() *types.FormPanel {
return base.Form.SetPrimaryKey(base.PrimaryKey.Name, base.PrimaryKey.Type)
}
// GetNewForm 處理並設置表單欄位細節資訊(新增資料的表單欄位)
func (base *BaseTable) GetNewForm(services service.List) FormInfo {
return FormInfo{FieldList: base.Form.SetAllowAddValueOfField(services, base.GetSQLByService),
Title: base.Form.Title,
Description: base.Form.Description}
}
// GetDataWithID 透過id取得資料並將值、預設值設置至BaseTable.Form.FormFields
func (base *BaseTable) GetDataWithID(param parameter.Parameters, services service.List) (FormInfo, error) {
var (
// FindPK 取得__pk的值(單個)
id = param.FindPK()
res map[string]interface{}
args = []interface{}{id}
fields, joins, groupBy = "", "", ""
tableName = base.GetFormPanel().Table
pk = tableName + "." + base.PrimaryKey.Name
queryStatement = "select %s from %s" + " %s where " + pk + " = ? %s "
)
// 所有欄位
columns, _ := base.getColumns(base.Form.Table, services)
for _, field := range base.Form.FieldList {
if field.Field != pk && utils.InArray(columns, field.Field) && !field.Joins.Valid() {
fields += tableName + "." + field.Field + ","
}
}
fields += pk
queryCmd := fmt.Sprintf(queryStatement, fields, tableName, joins, groupBy)
result, err := base.getConnectionByService(services).Query(queryCmd, args...)
if err != nil {
return FormInfo{Title: base.Form.Title, Description: base.Form.Description}, err
}
if len(result) == 0 {
return FormInfo{Title: base.Form.Title, Description: base.Form.Description}, errors.New("錯誤的id")
}
res = result[0]
var fieldList = base.Form.FieldWithValue(base.PrimaryKey.Name,
id, columns, res, services, base.GetSQLByService)
return FormInfo{
FieldList: fieldList,
Title: base.Form.Title,
Description: base.Form.Description,
}, nil
}
// GetData 從資料庫取得頁面需要顯示的資料,回傳每一筆資料資訊、欄位資訊、可過濾欄位資訊...等
func (base *BaseTable) GetData(params parameter.Parameters, services service.List) (PanelInfo, error) {
return base.getDataFromDatabase(params, services)
}
// InsertData 插入資料
func (base *BaseTable) InsertData(dataList form.Values) error {
if base.Form.InsertFunc != nil {
err := base.Form.InsertFunc(dataList)
if err != nil {
return err
}
}
return nil
}
// UpdateData 更新資料
func (base *BaseTable) UpdateData(dataList form.Values) error {
if base.Form.UpdateFunc != nil {
err := base.Form.UpdateFunc(dataList)
if err != nil {
return err
}
}
return nil
}
// DeleteData 刪除資料
func (base *BaseTable) DeleteData(id string) error {
id | strings.Split(id, ",")
if base.Informatoin.DeleteFunc != nil {
err := base.Informatoin.DeleteFunc(idArr)
return err
}
return nil
}
// -----BaseTable的所有Table方法-----end
// GetSQLByService 設置db.SQL(struct)的Connection、CRUD
func (base *BaseTable) GetSQLByService(services service.List) *db.SQL {
if base.connectionDriver != "" {
return db.SetConnectionAndCRUD(db.ConvertServiceToConnection(services.Get(base.connectionDriver)))
}
return nil
}
// getConnectionByService 取得Connection(interface)
func (base *BaseTable) getConnectionByService(services service.List) db.Connection {
if base.connectionDriver != "" {
return db.ConvertServiceToConnection(services.Get(base.connectionDriver))
}
return nil
}
// getColumns 取得所有欄位
func (base *BaseTable) getColumns(table string, services service.List) ([]string, bool) {
var auto bool
columnsModel, _ := base.GetSQLByService(services).Table(table).ShowColumns()
columns := make([]string, len(columnsModel))
for key, model := range columnsModel {
columns[key] = model["Field"].(string)
if columns[key] == base.PrimaryKey.Name { // 如果為主鍵
if v, ok := model["Extra"].(string); ok {
if v == "auto_increment" {
auto = true
}
}
}
}
return columns, auto
}
// getDataFromDatabase 從資料庫取得頁面需要顯示的資料,回傳每一筆資料資訊、欄位資訊、可過濾欄位資訊...等
func (base *BaseTable) getDataFromDatabase(params parameter.Parameters, services service.List) (PanelInfo, error) {
var (
connection = base.getConnectionByService(services)
ids = params.FindPKs() // FindPKs 取得__pk的值(多個)
countStatement string
queryStatement string
primaryKey = base.Informatoin.Table + "." + base.PrimaryKey.Name // 主鍵
wheres = ""
args = make([]interface{}, 0)
whereArgs = make([]interface{}, 0)
existKeys = make([]string, 0)
size int
)
if len(ids) > 0 {
queryStatement = "select %s from %s%s where " + primaryKey + " in (%s) %s ORDER BY %s.%s %s"
countStatement = "select count(*) from %s %s where " + primaryKey + " in (%s)"
} else {
queryStatement = "select %s from %s%s %s %s order by %s.%s %s LIMIT ? OFFSET ?"
countStatement = "select count(*) from %s %s %s"
}
// 取得所有欄位
columns, _ := base.getColumns(base.Informatoin.Table, services)
// getFieldInformationAndJoinOrderAndFilterForm 取得欄位資訊、join的語法及table、可過濾欄位資訊
fieldList, fields, joinFields, joins, joinTables, filterForm := base.getFieldInformationAndJoinOrderAndFilterForm(params, columns)
// 加上主鍵
fields += primaryKey
// 所有欄位
allFields := fields
if joinFields != "" {
// 加上join其他表的欄位(ex: group_concat(roles.`name` separator 'CkN694kH') as roles_join_name,)
allFields += "," + joinFields[:len(joinFields)-1]
}
if len(ids) > 0 {
for _, value := range ids {
if value != "" {
wheres += "?,"
args = append(args, value)
}
}
wheres = wheres[:len(wheres)-1]
} else {
// WhereStatement 處理過濾的where語法
wheres, whereArgs, existKeys = params.WhereStatement(wheres, base.Informatoin.Table, whereArgs, columns, existKeys)
wheres, whereArgs = base.Informatoin.Wheres.WhereStatement(wheres, whereArgs, existKeys, columns)
if wheres != "" {
wheres = " where " + wheres
}
if connection.Name() == "mysql" {
pageSizeInt, _ := strconv.Atoi(params.PageSize)
pageInt, _ := strconv.Atoi(params.Page)
args = append(whereArgs, pageSizeInt, (pageInt-1)*(pageSizeInt))
}
}
| Arr := | identifier_name |
table.go | )
countStatement string
queryStatement string
primaryKey = base.Informatoin.Table + "." + base.PrimaryKey.Name // 主鍵
wheres = ""
args = make([]interface{}, 0)
whereArgs = make([]interface{}, 0)
existKeys = make([]string, 0)
size int
)
if len(ids) > 0 {
queryStatement = "select %s from %s%s where " + primaryKey + " in (%s) %s ORDER BY %s.%s %s"
countStatement = "select count(*) from %s %s where " + primaryKey + " in (%s)"
} else {
queryStatement = "select %s from %s%s %s %s order by %s.%s %s LIMIT ? OFFSET ?"
countStatement = "select count(*) from %s %s %s"
}
// 取得所有欄位
columns, _ := base.getColumns(base.Informatoin.Table, services)
// getFieldInformationAndJoinOrderAndFilterForm 取得欄位資訊、join的語法及table、可過濾欄位資訊
fieldList, fields, joinFields, joins, joinTables, filterForm := base.getFieldInformationAndJoinOrderAndFilterForm(params, columns)
// 加上主鍵
fields += primaryKey
// 所有欄位
allFields := fields
if joinFields != "" {
// 加上join其他表的欄位(ex: group_concat(roles.`name` separator 'CkN694kH') as roles_join_name,)
allFields += "," + joinFields[:len(joinFields)-1]
}
if len(ids) > 0 {
for _, value := range ids {
if value != "" {
wheres += "?,"
args = append(args, value)
}
}
wheres = wheres[:len(wheres)-1]
} else {
// WhereStatement 處理過濾的where語法
wheres, whereArgs, existKeys = params.WhereStatement(wheres, base.Informatoin.Table, whereArgs, columns, existKeys)
wheres, whereArgs = base.Informatoin.Wheres.WhereStatement(wheres, whereArgs, existKeys, columns)
if wheres != "" {
wheres = " where " + wheres
}
if connection.Name() == "mysql" {
pageSizeInt, _ := strconv.Atoi(params.PageSize)
pageInt, _ := strconv.Atoi(params.Page)
args = append(whereArgs, pageSizeInt, (pageInt-1)*(pageSizeInt))
}
}
groupBy := ""
if len(joinTables) > 0 {
if connection.Name() == "mysql" {
groupBy = " GROUP BY " + primaryKey
}
}
// sql 語法
queryCmd := fmt.Sprintf(queryStatement, allFields, base.Informatoin.Table, joins, wheres, groupBy,
base.Informatoin.Table, params.SortField, params.SortType)
res, err := connection.Query(queryCmd, args...)
if err != nil {
return PanelInfo{}, err
}
// 頁面上顯示的所有資料
infoList := make([]map[string]types.InfoItem, 0)
for i := 0; i < len(res); i++ {
infoList = append(infoList, base.getTemplateDataModel(res[i], params, columns))
}
// 計算資料數
if len(ids) == 0 {
countCmd := fmt.Sprintf(countStatement, base.Informatoin.Table, joins, wheres)
total, err := connection.Query(countCmd, whereArgs...)
if err != nil {
return PanelInfo{}, err
}
if base.connectionDriver == "mysql" {
size = int(total[0]["count(*)"].(int64))
}
}
// 設置Paginator.option(在被選中顯示資料筆數的地方加上select)
paginator := paginator.GetPaginatorInformation(size, params)
paginator.PageSizeList = base.Informatoin.GetPageSizeList()
paginator.Option = make(map[string]template.HTML, len(paginator.PageSizeList))
for i := 0; i < len(paginator.PageSizeList); i++ {
paginator.Option[paginator.PageSizeList[i]] = template.HTML("")
}
paginator.Option[params.PageSize] = template.HTML("select")
return PanelInfo{
InfoList: infoList,
FieldList: fieldList,
Paginator: paginator,
PrimaryKey: base.PrimaryKey.Name,
Title: base.Informatoin.Title,
FilterFormData: filterForm,
Description: base.Informatoin.Description,
}, nil
}
// getTemplateDataModel 取得並處理模板的每一筆資料
func (base *BaseTable) getTemplateDataModel(res map[string]interface{}, params parameter.Parameters, columns []string) map[string]types.InfoItem {
var templateDataModel = make(map[string]types.InfoItem)
headField := ""
// 取得id
primaryKeyValue := db.GetValueFromDatabaseType(base.PrimaryKey.Type, res[base.PrimaryKey.Name])
for _, field := range base.Informatoin.FieldList {
headField = field.Field
// 如果有關聯其他表
if field.Joins.Valid() {
// ex: roles_join_name
headField = field.Joins.Last().JoinTable + "_join_" + field.Field
}
if field.Hide {
continue
}
if !utils.InArrayWithoutEmpty(params.Columns, headField) {
continue
}
typeName := field.TypeName
if field.Joins.Valid() {
typeName = db.Varchar
}
// 每個欄位的值
combineValue := db.GetValueFromDatabaseType(typeName, res[headField]).String()
var value interface{}
if len(columns) == 0 || utils.InArray(columns, headField) || field.Joins.Valid() {
value = field.FieldDisplay.DisplayFunc(types.FieldModel{
ID: primaryKeyValue.String(),
Value: combineValue,
Row: res,
})
} else {
value = field.FieldDisplay.DisplayFunc(types.FieldModel{
ID: primaryKeyValue.String(),
Value: "",
Row: res,
})
}
if valueStr, ok := value.(string); ok {
templateDataModel[headField] = types.InfoItem{
Content: template.HTML(valueStr),
Value: combineValue,
}
} else {
// 角色欄位會執行
templateDataModel[headField] = types.InfoItem{
Content: value.(template.HTML),
Value: combineValue,
}
}
}
// 不管有沒有顯示id(主鍵)欄位,都要加上id的欄位資訊
primaryKeyField := base.Informatoin.FieldList.GetFieldByFieldName(base.PrimaryKey.Name)
value := primaryKeyField.FieldDisplay.DisplayFunc(types.FieldModel{
ID: primaryKeyValue.String(),
Value: primaryKeyValue.String(),
Row: res,
})
if valueStr, ok := value.(string); ok {
templateDataModel[base.PrimaryKey.Name] = types.InfoItem{
Content: template.HTML(valueStr),
Value: primaryKeyValue.String(),
}
} else {
// 角色欄位會執行
templateDataModel[base.PrimaryKey.Name] = types.InfoItem{
Content: value.(template.HTML),
Value: primaryKeyValue.String(),
}
}
return templateDataModel
}
// getFieldInformationAndJoinOrderAndFilterForm 取得欄位資訊、join的語法及table、可過濾欄位資訊
func (base *BaseTable) getFieldInformationAndJoinOrderAndFilterForm(params parameter.Parameters, columns []string) (types.FieldList,
string, string, string, []string, []types.FormField) {
return base.Informatoin.FieldList.GetFieldInformationAndJoinOrderAndFilterForm(types.TableInfo{
Table: base.Informatoin.Table,
Delimiter: base.getDelimiter(),
Driver: base.connectionDriver,
PrimaryKey: base.PrimaryKey.Name,
}, params, columns)
}
// getDelimiter 取得分隔符號
func (base *BaseTable) getDelimiter() string {
if base.connectionDriver == "mysql" {
return "'"
}
return ""
}
// SetServices 設置serivces,services是套件中的全域變數
func SetServices(srv service.List) {
lock.Lock()
defer lock.Unlock()
if atomic.LoadUint32(&count) != 0 {
panic("can not initialize twice")
}
services = srv
}
// Combine 將頁面及表單資訊加入List中
func (g List) Combine(list List) List {
for key, gen := range list {
if _, ok := g[key]; !ok {
g[key] = gen
}
}
return g
}
// CombineAll 將所有頁面及表單資訊加入List中
func (g List) CombineAll(gens []List) List {
for _, list := range gens {
for key, gen := range list {
if _, ok := g[key]; !ok {
g[key] = gen
}
}
}
return g
}
| conditional_block |
||
session.rs | let timestamp = time.timestamp() as i64;
let nano = time.nanosecond() as i64;
((timestamp << 32) | (nano & 0x_7fff_fffc))
}
#[derive(Debug, Clone)]
pub struct AppId {
pub api_id: i32,
pub api_hash: String,
}
#[derive(Debug, Clone)]
struct Salt {
valid_since: DateTime<Utc>,
valid_until: DateTime<Utc>,
salt: i64,
}
impl From<mtproto::FutureSalt> for Salt {
fn from(fs: mtproto::FutureSalt) -> Self {
Salt {
valid_since: Utc.timestamp(*fs.valid_since() as i64, 0),
valid_until: Utc.timestamp(*fs.valid_until() as i64, 0),
salt: *fs.salt(),
}
}
}
#[derive(Debug, Clone)]
pub struct Session {
session_id: i64,
temp_session_id: Option<i64>,
server_salts: Vec<Salt>,
seq_no: i32,
auth_key: Option<AuthKey>,
to_ack: Vec<i64>,
app_id: AppId,
}
#[derive(Debug, Default)]
pub struct PlainPayload {
dummy: (),
}
#[derive(Debug, Default)]
pub struct EncryptedPayload {
session_id: Option<i64>,
}
pub struct MessageBuilder<P> {
message_id: i64,
payload: mtproto::TLObject,
payload_opts: P,
}
pub type EitherMessageBuilder = MessageBuilder<Either<PlainPayload, EncryptedPayload>>;
impl<PO: Default> MessageBuilder<PO> {
fn with_message_id<P>(message_id: i64, payload: P) -> Self
where P: AnyBoxedSerialize,
{
let payload = mtproto::TLObject::new(payload).into();
MessageBuilder {
message_id, payload,
payload_opts: Default::default(),
}
}
pub fn new<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
Self::with_message_id(next_message_id(), payload)
}
}
impl<PO> MessageBuilder<PO> {
pub fn message_id(&self) -> i64 {
self.message_id
}
pub fn constructor(&self) -> ConstructorNumber {
self.payload.serialize_boxed().0
}
fn seq_no_from<SNF>(&self, seq_no_func: SNF) -> i32
where SNF: FnOnce(bool) -> i32
{
seq_no_func(is_content_message(self.constructor()))
}
pub fn into_basic_message<SNF>(self, seq_no_func: SNF) -> mtproto::manual::basic_message::BasicMessage
where SNF: FnOnce(bool) -> i32,
{
mtproto::manual::basic_message::BasicMessage {
msg_id: self.message_id,
seqno: self.seq_no_from(seq_no_func),
body: self.payload.into(),
}
}
}
impl MessageBuilder<PlainPayload> {
pub fn into_outbound_raw(self) -> OutboundRaw {
OutboundRaw {
auth_key_id: 0,
message_id: self.message_id,
payload: self.payload.into(),
}
}
pub fn lift(self) -> EitherMessageBuilder {
MessageBuilder {
message_id: self.message_id,
payload: self.payload,
payload_opts: Either::Left(self.payload_opts),
}
}
}
impl MessageBuilder<EncryptedPayload> {
pub fn with_session_id(mut self, session_id: i64) -> Self {
self.payload_opts.session_id = Some(session_id);
self
}
pub fn into_outbound_encrypted<SNF>(self, salt: i64, session_id: i64, seq_no_func: SNF) -> OutboundEncrypted
where SNF: FnOnce(bool) -> i32,
{
OutboundEncrypted {
salt,
session_id: self.payload_opts.session_id.unwrap_or(session_id),
message_id: self.message_id,
seq_no: self.seq_no_from(seq_no_func),
payload: self.payload.into(),
}
}
pub fn lift(self) -> EitherMessageBuilder {
MessageBuilder {
message_id: self.message_id,
payload: self.payload,
payload_opts: Either::Right(self.payload_opts),
}
}
}
impl EitherMessageBuilder {
pub fn plain<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
MessageBuilder::<PlainPayload>::new(payload).lift()
}
pub fn encrypted<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
MessageBuilder::<EncryptedPayload>::new(payload).lift()
}
pub fn separate(self) -> Either<MessageBuilder<PlainPayload>, MessageBuilder<EncryptedPayload>> {
let MessageBuilder { message_id, payload, payload_opts: e } = self;
match e {
Either::Left(payload_opts) => Either::Left(MessageBuilder { message_id, payload, payload_opts }),
Either::Right(payload_opts) => Either::Right(MessageBuilder { message_id, payload, payload_opts }),
}
}
}
fn is_content_message(n: ConstructorNumber) -> bool {
// XXX: there has to be a better way
match n {
ConstructorNumber(0x62d6b459) |
ConstructorNumber(0x73f1f8dc) => false,
_ => true,
}
}
#[derive(Debug, Clone)]
pub struct InboundMessage {
pub message_id: i64,
pub payload: Vec<u8>,
pub was_encrypted: bool,
pub seq_no: Option<i32>,
}
impl Session {
pub fn new(app_id: AppId) -> Session {
Session {
app_id,
session_id: CSRNG.gen(),
temp_session_id: None,
server_salts: vec![],
seq_no: 0,
auth_key: None,
to_ack: vec![],
}
}
fn | (&mut self) -> i32 {
let ret = self.seq_no | 1;
self.seq_no += 2;
ret
}
fn next_seq_no(&mut self, content_message: bool) -> i32 {
if content_message {
self.next_content_seq_no()
} else {
self.seq_no
}
}
fn latest_server_salt(&mut self) -> Result<i64> {
let time = {
let last_salt = match self.server_salts.last() {
Some(s) => s,
None => Err(SessionFailure::NoSalts)?,
};
// Make sure at least one salt is retained.
cmp::min(Utc::now(), last_salt.valid_until.clone())
};
self.server_salts.retain(|s| &s.valid_until >= &time);
Ok(self.server_salts.first().unwrap().salt)
}
pub fn add_server_salts<I>(&mut self, salts: I)
where I: IntoIterator<Item = mtproto::FutureSalt>,
{
self.server_salts.extend(salts.into_iter().map(Into::into));
self.server_salts.sort_by(|a, b| a.valid_since.cmp(&b.valid_since));
}
pub fn adopt_key(&mut self, authorization_key: AuthKey) {
self.auth_key = Some(authorization_key);
}
pub fn ack_id(&mut self, id: i64) {
self.to_ack.push(id);
}
fn pack_message_container<PO, I>(&mut self, payloads: I) -> mtproto::manual::MessageContainer
where I: IntoIterator<Item = MessageBuilder<PO>>,
{
let messages: Vec<_> = payloads.into_iter()
.map(|m| m.into_basic_message(|c| self.next_seq_no(c)))
.collect();
mtproto::manual::msg_container::MsgContainer {
messages: messages.into(),
}.into_boxed()
}
fn fresh_auth_key(&self) -> Result<AuthKey> {
match self.auth_key {
Some(ref key) => Ok(key.clone()),
None => Err(SessionFailure::NoAuthKey.into()),
}
}
fn pack_payload_with_acks<PO: Default>(&mut self, payload: MessageBuilder<PO>) -> MessageBuilder<PO> {
if self.to_ack.is_empty() {
return payload;
};
let acks = MessageBuilder::new(mtproto::msgs_ack::MsgsAck {
msg_ids: mem::replace(&mut self.to_ack, vec![]).into(),
}.into_boxed());
MessageBuilder::new(self.pack_message_container(vec![payload, acks]))
}
pub fn serialize_plain_message(&mut self, message: MessageBuilder<PlainPayload>) -> Result<Vec<u8>> {
Ok(message.into_outbound_raw().bare_serialized_bytes()?)
}
pub fn serialize_encrypted_message(&mut self, message: MessageBuilder<EncryptedPayload>) -> Result<Vec<u8>> {
let key = self.fresh_auth_key()?;
let message = self.pack_payload_with_acks(message)
.into_outbound_encrypted(
self.latest_server_salt()?, self.session_id,
|c| self.next_seq_no(c));
Ok(key.encrypt_message(message)?)
}
pub fn serialize_message(&mut self, message: EitherMessageBuilder) -> Result<Vec | next_content_seq_no | identifier_name |
session.rs | 4;
((timestamp << 32) | (nano & 0x_7fff_fffc))
}
#[derive(Debug, Clone)]
pub struct AppId {
pub api_id: i32,
pub api_hash: String,
}
#[derive(Debug, Clone)]
struct Salt {
valid_since: DateTime<Utc>,
valid_until: DateTime<Utc>,
salt: i64,
}
impl From<mtproto::FutureSalt> for Salt {
fn from(fs: mtproto::FutureSalt) -> Self {
Salt {
valid_since: Utc.timestamp(*fs.valid_since() as i64, 0),
valid_until: Utc.timestamp(*fs.valid_until() as i64, 0),
salt: *fs.salt(),
}
}
}
#[derive(Debug, Clone)]
pub struct Session {
session_id: i64,
temp_session_id: Option<i64>,
server_salts: Vec<Salt>,
seq_no: i32,
auth_key: Option<AuthKey>,
to_ack: Vec<i64>,
app_id: AppId,
}
#[derive(Debug, Default)]
pub struct PlainPayload {
dummy: (),
}
#[derive(Debug, Default)]
pub struct EncryptedPayload {
session_id: Option<i64>,
}
pub struct MessageBuilder<P> {
message_id: i64,
payload: mtproto::TLObject,
payload_opts: P,
}
pub type EitherMessageBuilder = MessageBuilder<Either<PlainPayload, EncryptedPayload>>;
impl<PO: Default> MessageBuilder<PO> {
fn with_message_id<P>(message_id: i64, payload: P) -> Self
where P: AnyBoxedSerialize,
{
let payload = mtproto::TLObject::new(payload).into();
MessageBuilder {
message_id, payload,
payload_opts: Default::default(),
}
}
pub fn new<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
Self::with_message_id(next_message_id(), payload)
}
}
impl<PO> MessageBuilder<PO> {
pub fn message_id(&self) -> i64 {
self.message_id
}
pub fn constructor(&self) -> ConstructorNumber {
self.payload.serialize_boxed().0
}
fn seq_no_from<SNF>(&self, seq_no_func: SNF) -> i32
where SNF: FnOnce(bool) -> i32
{
seq_no_func(is_content_message(self.constructor()))
}
pub fn into_basic_message<SNF>(self, seq_no_func: SNF) -> mtproto::manual::basic_message::BasicMessage
where SNF: FnOnce(bool) -> i32,
{
mtproto::manual::basic_message::BasicMessage {
msg_id: self.message_id,
seqno: self.seq_no_from(seq_no_func),
body: self.payload.into(),
}
}
}
impl MessageBuilder<PlainPayload> {
pub fn into_outbound_raw(self) -> OutboundRaw {
OutboundRaw {
auth_key_id: 0,
message_id: self.message_id,
payload: self.payload.into(),
}
}
pub fn lift(self) -> EitherMessageBuilder {
MessageBuilder {
message_id: self.message_id,
payload: self.payload,
payload_opts: Either::Left(self.payload_opts),
}
}
}
impl MessageBuilder<EncryptedPayload> {
pub fn with_session_id(mut self, session_id: i64) -> Self {
self.payload_opts.session_id = Some(session_id);
self
}
pub fn into_outbound_encrypted<SNF>(self, salt: i64, session_id: i64, seq_no_func: SNF) -> OutboundEncrypted
where SNF: FnOnce(bool) -> i32,
{
OutboundEncrypted {
salt,
session_id: self.payload_opts.session_id.unwrap_or(session_id),
message_id: self.message_id,
seq_no: self.seq_no_from(seq_no_func),
payload: self.payload.into(),
}
}
pub fn lift(self) -> EitherMessageBuilder {
MessageBuilder {
message_id: self.message_id,
payload: self.payload,
payload_opts: Either::Right(self.payload_opts),
}
}
}
impl EitherMessageBuilder {
pub fn plain<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
MessageBuilder::<PlainPayload>::new(payload).lift()
}
pub fn encrypted<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
MessageBuilder::<EncryptedPayload>::new(payload).lift()
}
pub fn separate(self) -> Either<MessageBuilder<PlainPayload>, MessageBuilder<EncryptedPayload>> {
let MessageBuilder { message_id, payload, payload_opts: e } = self;
match e {
Either::Left(payload_opts) => Either::Left(MessageBuilder { message_id, payload, payload_opts }),
Either::Right(payload_opts) => Either::Right(MessageBuilder { message_id, payload, payload_opts }),
}
}
}
fn is_content_message(n: ConstructorNumber) -> bool {
// XXX: there has to be a better way
match n {
ConstructorNumber(0x62d6b459) |
ConstructorNumber(0x73f1f8dc) => false,
_ => true,
}
}
#[derive(Debug, Clone)]
pub struct InboundMessage {
pub message_id: i64,
pub payload: Vec<u8>,
pub was_encrypted: bool,
pub seq_no: Option<i32>,
}
impl Session {
pub fn new(app_id: AppId) -> Session {
Session {
app_id,
session_id: CSRNG.gen(),
temp_session_id: None,
server_salts: vec![],
seq_no: 0,
auth_key: None,
to_ack: vec![],
}
}
fn next_content_seq_no(&mut self) -> i32 {
let ret = self.seq_no | 1;
self.seq_no += 2;
ret
}
fn next_seq_no(&mut self, content_message: bool) -> i32 {
if content_message {
self.next_content_seq_no()
} else {
self.seq_no
}
}
fn latest_server_salt(&mut self) -> Result<i64> {
let time = {
let last_salt = match self.server_salts.last() {
Some(s) => s,
None => Err(SessionFailure::NoSalts)?,
};
// Make sure at least one salt is retained.
cmp::min(Utc::now(), last_salt.valid_until.clone())
};
self.server_salts.retain(|s| &s.valid_until >= &time);
Ok(self.server_salts.first().unwrap().salt)
}
pub fn add_server_salts<I>(&mut self, salts: I)
where I: IntoIterator<Item = mtproto::FutureSalt>,
{
self.server_salts.extend(salts.into_iter().map(Into::into));
self.server_salts.sort_by(|a, b| a.valid_since.cmp(&b.valid_since));
}
pub fn adopt_key(&mut self, authorization_key: AuthKey) {
self.auth_key = Some(authorization_key);
}
pub fn ack_id(&mut self, id: i64) {
self.to_ack.push(id);
}
fn pack_message_container<PO, I>(&mut self, payloads: I) -> mtproto::manual::MessageContainer
where I: IntoIterator<Item = MessageBuilder<PO>>,
{
let messages: Vec<_> = payloads.into_iter()
.map(|m| m.into_basic_message(|c| self.next_seq_no(c)))
.collect();
mtproto::manual::msg_container::MsgContainer {
messages: messages.into(),
}.into_boxed()
}
fn fresh_auth_key(&self) -> Result<AuthKey> {
match self.auth_key {
Some(ref key) => Ok(key.clone()),
None => Err(SessionFailure::NoAuthKey.into()),
}
}
fn pack_payload_with_acks<PO: Default>(&mut self, payload: MessageBuilder<PO>) -> MessageBuilder<PO> {
if self.to_ack.is_empty() {
return payload;
};
let acks = MessageBuilder::new(mtproto::msgs_ack::MsgsAck {
msg_ids: mem::replace(&mut self.to_ack, vec![]).into(),
}.into_boxed());
MessageBuilder::new(self.pack_message_container(vec![payload, acks]))
}
pub fn serialize_plain_message(&mut self, message: MessageBuilder<PlainPayload>) -> Result<Vec<u8>> {
Ok(message.into_outbound_raw().bare_serialized_bytes()?)
}
pub fn serialize_encrypted_message(&mut self, message: MessageBuilder<EncryptedPayload>) -> Result<Vec<u8>> {
let key = self.fresh_auth_key()?;
let message = self.pack_payload_with_acks(message)
.into_outbound_encrypted(
self.latest_server_salt()?, self.session_id,
|c| self.next_seq_no(c));
Ok(key.encrypt_message(message)?)
}
pub fn serialize_message(&mut self, message: EitherMessageBuilder) -> Result<Vec<u8>> {
match message.separate() { | Either::Left(m) => self.serialize_plain_message(m), | random_line_split |
|
typechecking.rs | _table_handle.borrow();
self.global_env.populate_from_symbols(symbol_table);
let output = self.global_env.infer_block(&input.0)?;
Ok(format!("{:?}", output))
}
}
impl TypeEnv {
fn instantiate(&mut self, sigma: Scheme) -> Type {
match sigma {
Scheme { ty, .. } => ty,
}
}
fn generate(&mut self, ty: Type) -> Scheme {
Scheme {
names: vec![], //TODO incomplete
ty
}
}
fn infer_block(&mut self, block: &Vec<ast::Statement>) -> TypeResult<Type> {
let mut output = Type::Const(TConst::Unit);
for statement in block {
output = self.infer_statement(statement)?;
}
Ok(output)
}
fn infer_statement(&mut self, statement: &ast::Statement) -> TypeResult<Type> {
match statement {
ast::Statement::ExpressionStatement(expr) => self.infer_expr(expr),
ast::Statement::Declaration(decl) => self.infer_decl(decl)
}
}
fn infer_decl(&mut self, decl: &ast::Declaration) -> TypeResult<Type> {
use ast::Declaration::*;
match decl {
Binding { name, expr, .. } => {
let ty = self.infer_expr(expr)?;
let sigma = self.generate(ty);
self.0.insert(name.clone(), sigma);
},
_ => (),
}
Ok(Type::Const(TConst::Unit))
}
fn infer_expr(&mut self, expr: &ast::Expression) -> TypeResult<Type> {
match expr {
ast::Expression(expr, Some(anno)) => {
self.infer_exprtype(expr)
},
ast::Expression(expr, None) => {
self.infer_exprtype(expr)
}
}
}
fn infer_exprtype(&mut self, expr: &ast::ExpressionType) -> TypeResult<Type> {
use self::TConst::*;
use ast::ExpressionType::*;
Ok(match expr {
NatLiteral(_) => Type::Const(Nat),
StringLiteral(_) => Type::Const(StringT),
BinExp(op, lhs, rhs) => {
return Err(format!("NOTDONE"))
},
Call { f, arguments } => {
return Err(format!("NOTDONE"))
},
Value(name) => {
let s = match self.0.get(name) {
Some(sigma) => sigma.clone(),
None => return Err(format!("Unknown variable: {}", name))
};
self.instantiate(s)
},
_ => Type::Const(Unit)
})
}
}
/* GIANT TODO - use the rust im crate, unless I make this code way less haskell-ish after it's done
*/
/*
pub type TypeResult<T> = Result<T, String>;
*/
/* TODO this should just check the name against a map, and that map should be pre-populated with
* types */
/*
impl parsing::TypeName {
fn to_type(&self) -> TypeResult<Type> {
use self::parsing::TypeSingletonName;
use self::parsing::TypeName::*;
use self::Type::*; use self::TConstOld::*;
Ok(match self {
Tuple(_) => return Err(format!("Tuples not yet implemented")),
Singleton(name) => match name {
TypeSingletonName { name, .. } => match &name[..] {
/*
"Nat" => Const(Nat),
"Int" => Const(Int),
"Float" => Const(Float),
"Bool" => Const(Bool),
"String" => Const(StringT),
*/
n => Const(Custom(n.to_string()))
}
}
})
}
}
*/
/*
impl TypeContext {
pub fn type_check_ast(&mut self, ast: &parsing::AST) -> TypeResult<String> {
let ref block = ast.0;
let mut infer = Infer::default();
let env = TypeEnvironment::default();
let output = infer.infer_block(block, &env);
match output {
Ok(s) => Ok(format!("{:?}", s)),
Err(s) => Err(format!("Error: {:?}", s))
}
}
}
// this is the equivalent of the Haskell Infer monad
#[derive(Debug, Default)]
struct Infer {
_idents: u32,
}
#[derive(Debug)]
enum InferError {
CannotUnify(MonoType, MonoType),
OccursCheckFailed(Rc<String>, MonoType),
UnknownIdentifier(Rc<String>),
Custom(String),
}
type InferResult<T> = Result<T, InferError>;
impl Infer {
fn fresh(&mut self) -> MonoType {
let i = self._idents;
self._idents += 1;
let name = Rc::new(format!("{}", ('a' as u8 + 1) as char));
MonoType::Var(name)
}
fn unify(&mut self, a: MonoType, b: MonoType) -> InferResult<Substitution> {
use self::InferError::*; use self::MonoType::*;
Ok(match (a, b) {
(Const(ref a), Const(ref b)) if a == b => Substitution::new(),
(Var(ref name), ref var) => Substitution::bind_variable(name, var),
(ref var, Var(ref name)) => Substitution::bind_variable(name, var),
(Function(box a1, box b1), Function(box a2, box b2)) => {
let s1 = self.unify(a1, a2)?;
let s2 = self.unify(b1.apply_substitution(&s1), b2.apply_substitution(&s1))?;
s1.merge(s2)
},
(a, b) => return Err(CannotUnify(a, b))
})
}
fn infer_block(&mut self, block: &Vec<parsing::Statement>, env: &TypeEnvironment) -> InferResult<MonoType> {
use self::parsing::Statement;
let mut ret = MonoType::Const(TypeConst::Unit);
for statement in block.iter() {
ret = match statement {
Statement::ExpressionStatement(expr) => {
let (sub, ty) = self.infer_expr(expr, env)?;
//TODO handle substitution monadically
ty
}
Statement::Declaration(decl) => MonoType::Const(TypeConst::Unit),
}
}
Ok(ret)
}
fn infer_expr(&mut self, expr: &parsing::Expression, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::Expression;
match expr {
Expression(e, Some(anno)) => self.infer_annotated_expr(e, anno, env),
/*
let anno_ty = anno.to_type()?;
let ty = self.infer_exprtype(&e)?;
self.unify(ty, anno_ty)
},
*/
Expression(e, None) => self.infer_exprtype(e, env)
}
}
fn infer_annotated_expr(&mut self, expr: &parsing::ExpressionType, anno: &parsing::TypeName, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
Err(InferError::Custom(format!("exprtype not done: {:?}", expr)))
}
fn infer_exprtype(&mut self, expr: &parsing::ExpressionType, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::ExpressionType::*;
use self::TypeConst::*;
Ok(match expr {
NatLiteral(_) => (Substitution::new(), MonoType::Const(Nat)),
FloatLiteral(_) => (Substitution::new(), MonoType::Const(Float)),
StringLiteral(_) => (Substitution::new(), MonoType::Const(StringT)),
BoolLiteral(_) => (Substitution::new(), MonoType::Const(Bool)),
Value(name) => match env.lookup(name) {
Some(sigma) => {
let tau = self.instantiate(&sigma);
(Substitution::new(), tau)
},
None => return Err(InferError::UnknownIdentifier(name.clone())),
},
e => return Err(InferError::Custom(format!("Type inference for {:?} not done", e)))
})
}
fn instantiate(&mut self, sigma: &PolyType) -> MonoType {
let ref ty: MonoType = sigma.1;
let mut subst = Substitution::new();
for name in sigma.0.iter() {
let fresh_mvar = self.fresh();
let new = Substitution::bind_variable(name, &fresh_mvar);
subst = subst.merge(new);
}
ty.apply_substitution(&subst)
}
}
*/
/* OLD STUFF DOWN HERE */
/*
impl TypeContext {
fn infer_block(&mut self, statements: &Vec<parsing::Statement>) -> TypeResult<Type> {
let mut ret_type = Type::Const(TConst::Unit);
for statement in statements {
ret_type = self.infer_statement(statement)?;
}
Ok(ret_type)
}
fn infer_statement(&mut self, statement: &parsing::Statement) -> TypeResult<Type> {
use self::parsing::Statement::*;
match statement {
ExpressionStatement(expr) => self.infer(expr),
Declaration(decl) => self.add_declaration(decl),
}
}
fn add_declaration(&mut self, decl: &parsing::Declaration) -> TypeResult<Type> { | use self::parsing::Declaration::*;
use self::Type::*;
match decl { | random_line_split |
|
typechecking.rs | TypeName, Type>,
symbol_table_handle: Rc<RefCell<SymbolTable>>,
global_env: TypeEnv
}
impl<'a> TypeContext<'a> {
pub fn new(symbol_table_handle: Rc<RefCell<SymbolTable>>) -> TypeContext<'static> {
TypeContext { values: ScopeStack::new(None), global_env: TypeEnv::default(), symbol_table_handle }
}
pub fn debug_types(&self) -> String {
let mut output = format!("Type environment\n");
for (name, scheme) in &self.global_env.0 {
write!(output, "{} -> {}\n", name, scheme).unwrap();
}
output
}
pub fn type_check_ast(&mut self, input: &ast::AST) -> Result<String, String> {
let ref symbol_table = self.symbol_table_handle.borrow();
self.global_env.populate_from_symbols(symbol_table);
let output = self.global_env.infer_block(&input.0)?;
Ok(format!("{:?}", output))
}
}
impl TypeEnv {
fn instantiate(&mut self, sigma: Scheme) -> Type {
match sigma {
Scheme { ty, .. } => ty,
}
}
fn generate(&mut self, ty: Type) -> Scheme {
Scheme {
names: vec![], //TODO incomplete
ty
}
}
fn infer_block(&mut self, block: &Vec<ast::Statement>) -> TypeResult<Type> {
let mut output = Type::Const(TConst::Unit);
for statement in block {
output = self.infer_statement(statement)?;
}
Ok(output)
}
fn infer_statement(&mut self, statement: &ast::Statement) -> TypeResult<Type> {
match statement {
ast::Statement::ExpressionStatement(expr) => self.infer_expr(expr),
ast::Statement::Declaration(decl) => self.infer_decl(decl)
}
}
fn infer_decl(&mut self, decl: &ast::Declaration) -> TypeResult<Type> {
use ast::Declaration::*;
match decl {
Binding { name, expr, .. } => {
let ty = self.infer_expr(expr)?;
let sigma = self.generate(ty);
self.0.insert(name.clone(), sigma);
},
_ => (),
}
Ok(Type::Const(TConst::Unit))
}
fn infer_expr(&mut self, expr: &ast::Expression) -> TypeResult<Type> {
match expr {
ast::Expression(expr, Some(anno)) => {
self.infer_exprtype(expr)
},
ast::Expression(expr, None) => {
self.infer_exprtype(expr)
}
}
}
fn infer_exprtype(&mut self, expr: &ast::ExpressionType) -> TypeResult<Type> {
use self::TConst::*;
use ast::ExpressionType::*;
Ok(match expr {
NatLiteral(_) => Type::Const(Nat),
StringLiteral(_) => Type::Const(StringT),
BinExp(op, lhs, rhs) => {
return Err(format!("NOTDONE"))
},
Call { f, arguments } => {
| Value(name) => {
let s = match self.0.get(name) {
Some(sigma) => sigma.clone(),
None => return Err(format!("Unknown variable: {}", name))
};
self.instantiate(s)
},
_ => Type::Const(Unit)
})
}
}
/* GIANT TODO - use the rust im crate, unless I make this code way less haskell-ish after it's done
*/
/*
pub type TypeResult<T> = Result<T, String>;
*/
/* TODO this should just check the name against a map, and that map should be pre-populated with
* types */
/*
impl parsing::TypeName {
fn to_type(&self) -> TypeResult<Type> {
use self::parsing::TypeSingletonName;
use self::parsing::TypeName::*;
use self::Type::*; use self::TConstOld::*;
Ok(match self {
Tuple(_) => return Err(format!("Tuples not yet implemented")),
Singleton(name) => match name {
TypeSingletonName { name, .. } => match &name[..] {
/*
"Nat" => Const(Nat),
"Int" => Const(Int),
"Float" => Const(Float),
"Bool" => Const(Bool),
"String" => Const(StringT),
*/
n => Const(Custom(n.to_string()))
}
}
})
}
}
*/
/*
impl TypeContext {
pub fn type_check_ast(&mut self, ast: &parsing::AST) -> TypeResult<String> {
let ref block = ast.0;
let mut infer = Infer::default();
let env = TypeEnvironment::default();
let output = infer.infer_block(block, &env);
match output {
Ok(s) => Ok(format!("{:?}", s)),
Err(s) => Err(format!("Error: {:?}", s))
}
}
}
// this is the equivalent of the Haskell Infer monad
#[derive(Debug, Default)]
struct Infer {
_idents: u32,
}
#[derive(Debug)]
enum InferError {
CannotUnify(MonoType, MonoType),
OccursCheckFailed(Rc<String>, MonoType),
UnknownIdentifier(Rc<String>),
Custom(String),
}
type InferResult<T> = Result<T, InferError>;
impl Infer {
fn fresh(&mut self) -> MonoType {
let i = self._idents;
self._idents += 1;
let name = Rc::new(format!("{}", ('a' as u8 + 1) as char));
MonoType::Var(name)
}
fn unify(&mut self, a: MonoType, b: MonoType) -> InferResult<Substitution> {
use self::InferError::*; use self::MonoType::*;
Ok(match (a, b) {
(Const(ref a), Const(ref b)) if a == b => Substitution::new(),
(Var(ref name), ref var) => Substitution::bind_variable(name, var),
(ref var, Var(ref name)) => Substitution::bind_variable(name, var),
(Function(box a1, box b1), Function(box a2, box b2)) => {
let s1 = self.unify(a1, a2)?;
let s2 = self.unify(b1.apply_substitution(&s1), b2.apply_substitution(&s1))?;
s1.merge(s2)
},
(a, b) => return Err(CannotUnify(a, b))
})
}
fn infer_block(&mut self, block: &Vec<parsing::Statement>, env: &TypeEnvironment) -> InferResult<MonoType> {
use self::parsing::Statement;
let mut ret = MonoType::Const(TypeConst::Unit);
for statement in block.iter() {
ret = match statement {
Statement::ExpressionStatement(expr) => {
let (sub, ty) = self.infer_expr(expr, env)?;
//TODO handle substitution monadically
ty
}
Statement::Declaration(decl) => MonoType::Const(TypeConst::Unit),
}
}
Ok(ret)
}
fn infer_expr(&mut self, expr: &parsing::Expression, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::Expression;
match expr {
Expression(e, Some(anno)) => self.infer_annotated_expr(e, anno, env),
/*
let anno_ty = anno.to_type()?;
let ty = self.infer_exprtype(&e)?;
self.unify(ty, anno_ty)
},
*/
Expression(e, None) => self.infer_exprtype(e, env)
}
}
fn infer_annotated_expr(&mut self, expr: &parsing::ExpressionType, anno: &parsing::TypeName, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
Err(InferError::Custom(format!("exprtype not done: {:?}", expr)))
}
fn infer_exprtype(&mut self, expr: &parsing::ExpressionType, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::ExpressionType::*;
use self::TypeConst::*;
Ok(match expr {
NatLiteral(_) => (Substitution::new(), MonoType::Const(Nat)),
FloatLiteral(_) => (Substitution::new(), MonoType::Const(Float)),
StringLiteral(_) => (Substitution::new(), MonoType::Const(StringT)),
BoolLiteral(_) => (Substitution::new(), MonoType::Const(Bool)),
Value(name) => match env.lookup(name) {
Some(sigma) => {
let tau = self.instantiate(&sigma);
(Substitution::new(), tau)
},
None => return Err(InferError::UnknownIdentifier(name.clone())),
},
e => return Err(InferError::Custom(format!("Type inference for {:?} not done", e)))
})
}
fn instantiate(&mut self, sigma: &PolyType) -> MonoType {
let ref ty: MonoType = sigma.1;
let mut subst = Substitution::new();
for name in sigma.0.iter() {
let fresh_mvar = self.fresh();
let new = Substitution::bind_variable(name, &fresh_mvar);
subst = subst.merge(new);
}
ty.apply_substitution(&subst)
}
}
*/
|
return Err(format!("NOTDONE"))
},
| conditional_block |
typechecking.rs | ashMap<TypeName, Type>);
impl Substitution {
fn empty() -> Substitution {
Substitution(HashMap::new())
}
}
#[derive(Debug, PartialEq, Clone)]
struct TypeEnv(HashMap<TypeName, Scheme>);
impl TypeEnv {
fn default() -> TypeEnv {
TypeEnv(HashMap::new())
}
fn populate_from_symbols(&mut self, symbol_table: &SymbolTable) {
for (name, symbol) in symbol_table.values.iter() {
if let SymbolSpec::Func(ref type_names) = symbol.spec {
let mut ch: char = 'a';
let mut names = vec![];
for _ in type_names.iter() {
names.push(Rc::new(format!("{}", ch)));
ch = ((ch as u8) + 1) as char;
}
let sigma = Scheme {
names: names.clone(),
ty: Type::Func(names.into_iter().map(|n| Type::Var(n)).collect())
};
self.0.insert(name.clone(), sigma);
}
}
}
}
pub struct TypeContext<'a> {
values: ScopeStack<'a, TypeName, Type>,
symbol_table_handle: Rc<RefCell<SymbolTable>>,
global_env: TypeEnv
}
impl<'a> TypeContext<'a> {
pub fn new(symbol_table_handle: Rc<RefCell<SymbolTable>>) -> TypeContext<'static> {
TypeContext { values: ScopeStack::new(None), global_env: TypeEnv::default(), symbol_table_handle }
}
pub fn debug_types(&self) -> String {
let mut output = format!("Type environment\n");
for (name, scheme) in &self.global_env.0 {
write!(output, "{} -> {}\n", name, scheme).unwrap();
}
output
}
pub fn type_check_ast(&mut self, input: &ast::AST) -> Result<String, String> {
let ref symbol_table = self.symbol_table_handle.borrow();
self.global_env.populate_from_symbols(symbol_table);
let output = self.global_env.infer_block(&input.0)?;
Ok(format!("{:?}", output))
}
}
impl TypeEnv {
fn instantiate(&mut self, sigma: Scheme) -> Type {
match sigma {
Scheme { ty, .. } => ty,
}
}
fn generate(&mut self, ty: Type) -> Scheme {
Scheme {
names: vec![], //TODO incomplete
ty
}
}
fn infer_block(&mut self, block: &Vec<ast::Statement>) -> TypeResult<Type> {
let mut output = Type::Const(TConst::Unit);
for statement in block {
output = self.infer_statement(statement)?;
}
Ok(output)
}
fn infer_statement(&mut self, statement: &ast::Statement) -> TypeResult<Type> {
match statement {
ast::Statement::ExpressionStatement(expr) => self.infer_expr(expr),
ast::Statement::Declaration(decl) => self.infer_decl(decl)
}
}
fn infer_decl(&mut self, decl: &ast::Declaration) -> TypeResult<Type> {
use ast::Declaration::*;
match decl {
Binding { name, expr, .. } => {
let ty = self.infer_expr(expr)?;
let sigma = self.generate(ty);
self.0.insert(name.clone(), sigma);
},
_ => (),
}
Ok(Type::Const(TConst::Unit))
}
fn infer_expr(&mut self, expr: &ast::Expression) -> TypeResult<Type> {
match expr {
ast::Expression(expr, Some(anno)) => {
self.infer_exprtype(expr)
},
ast::Expression(expr, None) => {
self.infer_exprtype(expr)
}
}
}
fn infer_exprtype(&mut self, expr: &ast::ExpressionType) -> TypeResult<Type> {
use self::TConst::*;
use ast::ExpressionType::*;
Ok(match expr {
NatLiteral(_) => Type::Const(Nat),
StringLiteral(_) => Type::Const(StringT),
BinExp(op, lhs, rhs) => {
return Err(format!("NOTDONE"))
},
Call { f, arguments } => {
return Err(format!("NOTDONE"))
},
Value(name) => {
let s = match self.0.get(name) {
Some(sigma) => sigma.clone(),
None => return Err(format!("Unknown variable: {}", name))
};
self.instantiate(s)
},
_ => Type::Const(Unit)
})
}
}
/* GIANT TODO - use the rust im crate, unless I make this code way less haskell-ish after it's done
*/
/*
pub type TypeResult<T> = Result<T, String>;
*/
/* TODO this should just check the name against a map, and that map should be pre-populated with
* types */
/*
impl parsing::TypeName {
fn to_type(&self) -> TypeResult<Type> {
use self::parsing::TypeSingletonName;
use self::parsing::TypeName::*;
use self::Type::*; use self::TConstOld::*;
Ok(match self {
Tuple(_) => return Err(format!("Tuples not yet implemented")),
Singleton(name) => match name {
TypeSingletonName { name, .. } => match &name[..] {
/*
"Nat" => Const(Nat),
"Int" => Const(Int),
"Float" => Const(Float),
"Bool" => Const(Bool),
"String" => Const(StringT),
*/
n => Const(Custom(n.to_string()))
}
}
})
}
}
*/
/*
impl TypeContext {
pub fn type_check_ast(&mut self, ast: &parsing::AST) -> TypeResult<String> {
let ref block = ast.0;
let mut infer = Infer::default();
let env = TypeEnvironment::default();
let output = infer.infer_block(block, &env);
match output {
Ok(s) => Ok(format!("{:?}", s)),
Err(s) => Err(format!("Error: {:?}", s))
}
}
}
// this is the equivalent of the Haskell Infer monad
#[derive(Debug, Default)]
struct Infer {
_idents: u32,
}
#[derive(Debug)]
enum InferError {
CannotUnify(MonoType, MonoType),
OccursCheckFailed(Rc<String>, MonoType),
UnknownIdentifier(Rc<String>),
Custom(String),
}
type InferResult<T> = Result<T, InferError>;
impl Infer {
fn fresh(&mut self) -> MonoType {
let i = self._idents;
self._idents += 1;
let name = Rc::new(format!("{}", ('a' as u8 + 1) as char));
MonoType::Var(name)
}
fn unify(&mut self, a: MonoType, b: MonoType) -> InferResult<Substitution> {
use self::InferError::*; use self::MonoType::*;
Ok(match (a, b) {
(Const(ref a), Const(ref b)) if a == b => Substitution::new(),
(Var(ref name), ref var) => Substitution::bind_variable(name, var),
(ref var, Var(ref name)) => Substitution::bind_variable(name, var),
(Function(box a1, box b1), Function(box a2, box b2)) => {
let s1 = self.unify(a1, a2)?;
let s2 = self.unify(b1.apply_substitution(&s1), b2.apply_substitution(&s1))?;
s1.merge(s2)
},
(a, b) => return Err(CannotUnify(a, b))
})
}
fn infer_block(&mut self, block: &Vec<parsing::Statement>, env: &TypeEnvironment) -> InferResult<MonoType> {
use self::parsing::Statement;
let mut ret = MonoType::Const(TypeConst::Unit);
for statement in block.iter() {
ret = match statement {
Statement::ExpressionStatement(expr) => {
let (sub, ty) = self.infer_expr(expr, env)?;
//TODO handle substitution monadically
ty
}
Statement::Declaration(decl) => MonoType::Const(TypeConst::Unit),
}
}
Ok(ret)
}
fn infer_expr(&mut self, expr: &parsing::Expression, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::Expression;
match expr {
Expression(e, Some(anno)) => self.infer_annotated_expr(e, anno, env),
/*
let anno_ty = anno.to_type()?;
let ty = self.infer_exprtype(&e)?;
self.unify(ty, anno_ty)
},
*/
Expression(e, None) => self.infer_exprtype(e, env)
}
}
fn infer_annotated_expr(&mut self, expr: &parsing::ExpressionType, anno: &parsing::TypeName, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
Err(InferError::Custom(format!("exprtype not done: {:?}", expr)))
}
fn infer_exprtype(&mut self, expr: &parsing::ExpressionType, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::ExpressionType::*;
use self::TypeConst::*;
Ok(match expr {
NatLiteral(_) => (Sub | bstitution(H | identifier_name |
|
summary6.1.py | 计所有属于主动买入的Signal类型
buy_signal_types = t[t[u"类型"] == u"买入"].groupby('Signal')
# 统计所有属于主动卖空的Signal类型
short_signal_types = t[t['类型'] == '卖空'].groupby('Signal')
buy_signals_name_list = buy_signal_types.size()._index
short_signals_name_list = short_signal_types.size()._index
signal_class_list = []
for signal in buy_signals_name_list:
signal_class_list.append(Signal(signal))
for signal in short_signals_name_list:
signal_class_list.append(Signal(signal))
return '初始化完成'
def start():
'''
主程序
-----
'''
global frequency
def daily_frequency():
global deal_list, date_list
frequency, deal_list, date_list = dict(), list(), list()
for i in range(1, len(t)):
#统计每天次数
date = t['Date/Time'][i].floor('D')
date_list.append(date)
frequency[date] = frequency.get(date, 0) + 1
#添加交易记录deal对象
if i%2==1:
this_row = t.loc[i]; next_row = t.loc[i+1]
deal_list.append(
Deal(index=this_row['#'], dealtype = this_row['类型'],
start=this_row['Date/Time'], end= next_row['Date/Time'],
start_price=this_row['价格'], end_price=next_row['价格'],
start_signal=this_row['Signal'], end_signal=next_row['Signal'],
volume=this_row['Shares/Ctrts/Units - Profit/Loss']))
print('添加交易记录对象...')
return frequency
t0 = time.time()
init_signal_class()
print('初始化完成!')
print('Signal name', ' '*8, 'Profit(¥)\t\tVolume\tP/V')
for signal in signal_class_list:
signal.check()
print('[', signal.name, ']', ' '*(15-len(signal.name)), round(signal.sum_profit, 2),
'\t', signal.sum_volume, '\t', round(signal.sum_profit/signal.sum_volume, 3))
frequency = pd.Series(daily_frequency())
t1 = time.time()
tpy = t1-t0
print('Summation Finished Successfully in %5.3f seconds.' % tpy)
class Deal():
def __init__(self, index, dealtype, start, end, start_signal, end_signal, volume, start_price, end_price):
'''
多头dealtype = 1, 空头dealtype = 0
'''
self.index = index
if dealtype=='买入':self.type = 1
else: self.type = -1
self.start, self.end = start, end
self.start_signal = start_signal
self.end_signal = end_signal
self.volume = volume
self.start_price, self.end_price = start_price, end_price
self.profit = (end_price-start_price) * volume * self.type
self.floating = 0
self.duration = self.start.day - self.end.day
self.confirmed = False
def is_valid(self, today):
return (today>=self.start) & (today<self.end)
def confirm_profit(self):
self.confirmed = True
def cal_floating(self, close):
self.floating = (close - self.start_price) * self.volume * self.type
return self.floating
#%%
def 每日浮动收益统计(symbol:str, Type:str):
from data_reader import get_index_day, get_stock_day, get_index_future_day, get_comm_future_day
start_date = str(date_list[0])[:10]
end_date = str(date_list[-1])[:10]
if Type=='stock':
close = get_stock_day(symbol, start_date, end_date, freq = '1D')
elif Type=='index':
close = get_index_day(symbol, start_date, end_date, freq = '1D')
elif Type=='index_future':
close = get_index_future_day(symbol, start_date, end_date, freq = '1D')
elif Type=='comm_future':
close = get_comm_future_day(symbol, start_date, end_date, freq = '1D')
else:
print('Type is NOT acceptable, please check your input.')
quit()
close.index = close.index + pd.Timedelta(15, unit='h')
record = []
截止当日的累计盈利, comfirmed_profit = 0, 0
for day in close.index:
float_profit = 0
for deal in deal_list:
if deal.is_valid(day):
float_profit += deal.cal_floating(close.loc[day]['sclose'])
deal.floating_profit=deal.cal_floating(close.loc[day]['sclose'])
elif day.date()==deal.end.date():
#如果是当天结束的,当天确认收益
deal.confirm_profit()
comfirmed_profit+=deal.profit
#deal_list.remove(deal)
截止当日的累计盈利 = comfirmed_profit + float_profit
#print(day, int(float_profit), int(comfirmed_profit), int(截止当日的累计盈利),sep='\t')
record.append((day, float_profit, comfirmed_profit, 截止当日的累计盈利))
ans=pd.DataFrame(record,columns=('date','floating_profit','comfirmed_profit','accumlated_profit'))
ans=ans.set_index('date')
if Type.endswith('future'):
choice = input("You are using futures;\nDo you want to multiply amount by 200?\nInput 1 for YES, 0 for NO: ")
if choice=='0': future_multiplier = 1
else: future_multiplier = 200
ans[['floating_profit','comfirmed_profit','accumlated_profit']]*=future_multiplier
ans['当日盈亏']=ans.accumlated_profit.diff()
return ans
#%%
def output_to_excel():
from openpyxl.styles import Font, Border, numbers
from openpyxl.formatting.rule import DataBarRule
from openpyxl.drawing.image import Image
# %% 统计回撤 | 返回结果:最大回撤率,开始日期,结束日期,总收益率,年化收益,年化回撤
'''
t['Capital'] = float(t['价格'][1])+t['Net Profit - Cum Net Profit']
yearly_drawdown = dict()
t['Year']=pd.Series(t['Date/Time'][i].year for i in range(len(t)))
t_group=t.groupby('Year')
year_groups=[t_group.get_group(i) for i in t_group.groups.keys()]
for year_group in year_groups:
max_draw_down, temp_max_value = 0, 0
start_date, end_date, current_start_date = 0, 0, 0
continous = False # 是否连续
for i in year_group.index:
if year_group['#'][i]>0: continue
if temp_max_value < year_group['Capital'][i]:
current_start_date = year_group['Date/Time'][i]
temp_max_value = max(temp_max_value, year_group['Capital'][i])
continous = False
else:
if max_draw_down>year_group['Capital'][i]/temp_max_value-1:
if not continous:
continous = True
max_draw_down = year_group['Capital'][i]/temp_max_value-1
else:
if continous:
continous = False
start_date = current_start_date
end_date = year_group['Date/Time'][i]
yearly_drawdown[year_group['Year'][i]] = max_draw_down, start_date, end_date
yearly_return = dict() # 记录年收益率
max_draw_down, temp_max_value = 0, 0
start_date, end_date, current_start_date = 0, 0, 0
continous = False # 是否连续
for i in range(2, len(t),2): # 偶数行的数据
if temp_max_value < t['Capital'][i-2]:
current_start_date = t['Date/Time'][i]
temp_max_value = max(temp_max_value, t['Capital'][i-2])
if max_draw_down>t['Capital'][i]/temp_max_value-1:
if not continous:
continous = True
max_draw_down = t['Capital'][i]/temp_max_value-1
else:
if continous:
continous = False
start_date = current_start_date
end_date = t['Date/Time'][i]
# 统计年收益率
year = t['Date/Time'][i].year
yearly_return[year] = t['Net Profit - Cum Net Profit'][i]
total_return = t['Capital'][i]/t['Capital'][0]-1
yearly_return = pd.Series(yearly_return) / t['Capital'][0]
first_year = t['Date/Time'][1].year, yearly_return[t['Date/Time'][1].year]
yearly_return = yearly | def drawdown_by_time(t: pd.DataFrame):
'''
计算内容:最大回撤比例,累计收益率
计算方式:单利 | random_line_split |
summary6.1.py | 所有属于主动买入的Signal类型
buy_signal_types = t[t[u"类型"] == u"买入"].groupby('Signal')
# 统计所有属于主动卖空的Signal类型
short_signal_types = t[t['类型'] == '卖空'].groupby('Signal')
buy_signals_name_list = buy_signal_types.size()._index
short_signals_name_list = short_signal_types.size()._index
signal_class_list = []
for signal in buy_signals_name_list:
signal_class_list.append(Signal(signal))
for signal in short_signals_name_list:
signal_class_list.append(Signal(signal))
return '初始化完成'
def start():
'''
主程序
-----
'''
global frequency
def daily_frequency():
global deal_list, date_list
frequency, deal_list, date_list = dict(), list(), list()
for i in range(1, len(t)):
#统计每天次数
date = t['Date/Time'][i].floor('D')
date_list.append(date)
frequency[date] = frequency.get(date, 0) + 1
#添加交易记录deal对象
if i%2==1:
this_row = t.loc[i]; next_row = t.loc[i+1]
deal_list.append(
Deal(index=this_row['#'], dealtype = this_row['类型'],
start=this_row['Date/Time | ow['Date/Time'],
start_price=this_row['价格'], end_price=next_row['价格'],
start_signal=this_row['Signal'], end_signal=next_row['Signal'],
volume=this_row['Shares/Ctrts/Units - Profit/Loss']))
print('添加交易记录对象...')
return frequency
t0 = time.time()
init_signal_class()
print('初始化完成!')
print('Signal name', ' '*8, 'Profit(¥)\t\tVolume\tP/V')
for signal in signal_class_list:
signal.check()
print('[', signal.name, ']', ' '*(15-len(signal.name)), round(signal.sum_profit, 2),
'\t', signal.sum_volume, '\t', round(signal.sum_profit/signal.sum_volume, 3))
frequency = pd.Series(daily_frequency())
t1 = time.time()
tpy = t1-t0
print('Summation Finished Successfully in %5.3f seconds.' % tpy)
class Deal():
def __init__(self, index, dealtype, start, end, start_signal, end_signal, volume, start_price, end_price):
'''
多头dealtype = 1, 空头dealtype = 0
'''
self.index = index
if dealtype=='买入':self.type = 1
else: self.type = -1
self.start, self.end = start, end
self.start_signal = start_signal
self.end_signal = end_signal
self.volume = volume
self.start_price, self.end_price = start_price, end_price
self.profit = (end_price-start_price) * volume * self.type
self.floating = 0
self.duration = self.start.day - self.end.day
self.confirmed = False
def is_valid(self, today):
return (today>=self.start) & (today<self.end)
def confirm_profit(self):
self.confirmed = True
def cal_floating(self, close):
self.floating = (close - self.start_price) * self.volume * self.type
return self.floating
#%%
def 每日浮动收益统计(symbol:str, Type:str):
from data_reader import get_index_day, get_stock_day, get_index_future_day, get_comm_future_day
start_date = str(date_list[0])[:10]
end_date = str(date_list[-1])[:10]
if Type=='stock':
close = get_stock_day(symbol, start_date, end_date, freq = '1D')
elif Type=='index':
close = get_index_day(symbol, start_date, end_date, freq = '1D')
elif Type=='index_future':
close = get_index_future_day(symbol, start_date, end_date, freq = '1D')
elif Type=='comm_future':
close = get_comm_future_day(symbol, start_date, end_date, freq = '1D')
else:
print('Type is NOT acceptable, please check your input.')
quit()
close.index = close.index + pd.Timedelta(15, unit='h')
record = []
截止当日的累计盈利, comfirmed_profit = 0, 0
for day in close.index:
float_profit = 0
for deal in deal_list:
if deal.is_valid(day):
float_profit += deal.cal_floating(close.loc[day]['sclose'])
deal.floating_profit=deal.cal_floating(close.loc[day]['sclose'])
elif day.date()==deal.end.date():
#如果是当天结束的,当天确认收益
deal.confirm_profit()
comfirmed_profit+=deal.profit
#deal_list.remove(deal)
截止当日的累计盈利 = comfirmed_profit + float_profit
#print(day, int(float_profit), int(comfirmed_profit), int(截止当日的累计盈利),sep='\t')
record.append((day, float_profit, comfirmed_profit, 截止当日的累计盈利))
ans=pd.DataFrame(record,columns=('date','floating_profit','comfirmed_profit','accumlated_profit'))
ans=ans.set_index('date')
if Type.endswith('future'):
choice = input("You are using futures;\nDo you want to multiply amount by 200?\nInput 1 for YES, 0 for NO: ")
if choice=='0': future_multiplier = 1
else: future_multiplier = 200
ans[['floating_profit','comfirmed_profit','accumlated_profit']]*=future_multiplier
ans['当日盈亏']=ans.accumlated_profit.diff()
return ans
#%%
def output_to_excel():
from openpyxl.styles import Font, Border, numbers
from openpyxl.formatting.rule import DataBarRule
from openpyxl.drawing.image import Image
# %% 统计回撤
def drawdown_by_time(t: pd.DataFrame):
'''
计算内容:最大回撤比例,累计收益率
计算方式:单利
返回结果:最大回撤率,开始日期,结束日期,总收益率,年化收益,年化回撤
'''
t['Capital'] = float(t['价格'][1])+t['Net Profit - Cum Net Profit']
yearly_drawdown = dict()
t['Year']=pd.Series(t['Date/Time'][i].year for i in range(len(t)))
t_group=t.groupby('Year')
year_groups=[t_group.get_group(i) for i in t_group.groups.keys()]
for year_group in year_groups:
max_draw_down, temp_max_value = 0, 0
start_date, end_date, current_start_date = 0, 0, 0
continous = False # 是否连续
for i in year_group.index:
if year_group['#'][i]>0: continue
if temp_max_value < year_group['Capital'][i]:
current_start_date = year_group['Date/Time'][i]
temp_max_value = max(temp_max_value, year_group['Capital'][i])
continous = False
else:
if max_draw_down>year_group['Capital'][i]/temp_max_value-1:
if not continous:
continous = True
max_draw_down = year_group['Capital'][i]/temp_max_value-1
else:
if continous:
continous = False
start_date = current_start_date
end_date = year_group['Date/Time'][i]
yearly_drawdown[year_group['Year'][i]] = max_draw_down, start_date, end_date
yearly_return = dict() # 记录年收益率
max_draw_down, temp_max_value = 0, 0
start_date, end_date, current_start_date = 0, 0, 0
continous = False # 是否连续
for i in range(2, len(t),2): # 偶数行的数据
if temp_max_value < t['Capital'][i-2]:
current_start_date = t['Date/Time'][i]
temp_max_value = max(temp_max_value, t['Capital'][i-2])
if max_draw_down>t['Capital'][i]/temp_max_value-1:
if not continous:
continous = True
max_draw_down = t['Capital'][i]/temp_max_value-1
else:
if continous:
continous = False
start_date = current_start_date
end_date = t['Date/Time'][i]
# 统计年收益率
year = t['Date/Time'][i].year
yearly_return[year] = t['Net Profit - Cum Net Profit'][i]
total_return = t['Capital'][i]/t['Capital'][0]-1
yearly_return = pd.Series(yearly_return) / t['Capital'][0]
first_year = t['Date/Time'][1].year, yearly_return[t['Date/Time'][1].year]
yearly_return = | '], end= next_r | identifier_name |
summary6.1.py | 所有属于主动买入的Signal类型
buy_signal_types = t[t[u"类型"] == u"买入"].groupby('Signal')
# 统计所有属于主动卖空的Signal类型
short_signal_types = t[t['类型'] == '卖空'].groupby('Signal')
buy_signals_name_list = buy_signal_types.size()._index
short_signals_name_list = short_signal_types.size()._index
signal_class_list = []
for signal in buy_signals_name_list:
signal_class_list.append(Signal(signal))
for signal in short_signals_name_list:
signal_class_list.append(Signal(signal))
return '初始化完成'
def start():
'''
主程序
-----
'''
global frequency
def daily_frequency():
global deal_list, date_list
frequency, deal_list, date_list = dict(), list(), list()
for i in range(1, len(t)):
#统计每天次数
date = t['Date/Time'][i].floor('D')
date_list.append(date)
frequency[date] = frequency.get(date, 0) + 1
#添加交易记录deal对象
if i%2==1:
this_row = t.loc[i]; next_row = t.loc[i+1]
deal_list.append(
Deal(index=this_row['#'], dealtype = this_row['类型'],
start=this_row['Date/Time'], end= next_row['Date/Time'],
start_price=this_row['价格'], end_price=next_row['价格'],
start_signal=this_row['Signal'], end_signal=next_row['Signal'],
volume=this_row['Shares/Ctrts/Units - Profit/Loss']))
print('添加交易记录对象...')
return frequency
t0 = time.time()
init_signal_class()
print('初始化完成!')
print('Signal name', ' '*8, 'Profit(¥)\t\tVolume\tP/V')
for signal in signal_class_list:
signal.check()
print('[', signal.name, ']', ' '*(15-len(signal.name)), round(signal.sum_profit, 2),
'\t', signal.sum_volume, '\t', round(signal.sum_profit/signal.sum_volume, 3))
frequency = pd.Series(daily_frequency())
t1 = time.time()
tpy = t1-t0
print('Summation Finished Successfully in %5.3f seconds.' % tpy)
class Deal():
def __init__(self, index, dealtype, start, end, start_signal, end_signal, volume, start_price, end_price):
'''
多头dealtype = 1, 空头dealtype = 0
'''
self.index = index
if dealtype=='买入':self.type = 1
else: self.type = -1
self.start, self.end = start, end
self.start_signal = start_signal
self.end_signal = end_signal
self.volume = volume
self.start_price, self.end_price = start_price, end_price
self.profit = (end_price-start_price) * volume * self.type
self.floating = 0
self.duration = self.start.day - self.end.day
self.confirmed = False
def is_valid(self, today):
return (today>=self.start) & (today<self.end)
def confirm_profit(self):
self.confirmed = True
def cal_floating(self, close):
self.floating = (close - self.start_price) * self.volume * self.type
return self.floating
#%%
def 每日浮动收益统计(symbol:str, Type:str):
from data_reader import get_index_day, get_stock_day, get_index_future_day, get_comm_future_day
start_date = str(date_list[0])[:10]
end_date = str(date_list[-1])[:10]
if Type=='stock':
close = get_stock_day(symbol, start_date, end_date, freq = '1D')
elif Type=='index':
close = get_index_day(symbol, start_date, end_date, freq = '1D')
elif Type=='index_future':
close = get_index_future_day(symbol, start_date, end_date, freq = '1D')
elif Type=='comm_future':
close = get_comm_future_day(symbol, start_date, end_date, freq = '1D')
else:
print('Type is NOT acceptable, please check your input.')
quit()
close.index = close.index + pd.Timedelta(15, unit='h')
record = []
截止当日的累计盈利, comfirmed_profit = 0, 0
for day in close.index:
float_profit = 0
for deal in deal_list:
if deal.is_valid(day):
float_profit += deal.cal_floating(close.loc[day]['sclose'])
deal.floating_profit=deal.cal_floating(close.loc[day]['sclose'])
elif day.date()==deal.end.date():
#如果是当天结束的,当天确认收益
deal.confirm_profit()
comfirmed_profit+=deal.profit
#deal_list.remove(deal)
截止当日的累计盈利 = comfirmed_profit + float_profit
#print(day, int(float_profit), int(comfirmed_profit), int(截止当日的累计盈利),sep='\t')
record.append((day, float_profit, comfirmed_profit, 截止当日的累计盈利))
ans=pd.DataFrame(record,columns=('date','floating_profit','comfirmed_profit','accumlated_profit'))
ans=ans.set_index('date')
if Type.endswith('future'):
choice = input("You are using futures;\nDo you want to multiply amount by 200?\nInput 1 for YES, 0 for NO: ")
if choice=='0': future_multiplier = 1
else: future_multiplier = 200
ans[['floating_profit','comfirmed_profit','accumlated_profit']]*=future_multiplier
ans['当日盈亏']=ans.accumlated_profit.diff()
return ans
#%%
def output_to_excel():
from openpyxl.styles import Font, Border, numbers
from openpyxl.formatting.rule import DataBarRule
from openpyxl.drawing.image import Image
# %% 统计回撤
def drawdown_by_time(t: pd.DataFrame):
'''
计算内容:最大回撤比例,累计收益率
计算方式:单利
返回结果:最大回撤率,开始日期,结束日期,总收益率,年化收益,年化回撤
'''
t['Capital'] = float(t['价格'][1])+t['Net Profit - Cum Net Profit']
yearly_drawdown = dict()
t['Year']=pd.Series(t['Date/Time'][i].year for i in range(len(t)))
t_group=t.groupby('Year')
year_groups=[t_group.get_group(i) for i in t_group.groups.keys()]
for year_group in year_groups:
max_draw_down, temp_max_value = 0, 0
start_date, end_date, current_start_date = 0, 0, 0
continous = False # 是否连续
for i in year_group.index:
if year_group['#'][i]>0: continue
if temp_max_value < year_group['Capital'][i]:
current_start_date = year_gr | for i in range(2, len(t),2): # 偶数行的数据
if temp_max_value < t['Capital'][i-2]:
current_start_date = t['Date/Time'][i]
temp_max_value = max(temp_max_value, t['Capital'][i-2])
if max_draw_down>t['Capital'][i]/temp_max_value-1:
if not continous:
continous = True
max_draw_down = t['Capital'][i]/temp_max_value-1
else:
if continous:
continous = False
start_date = current_start_date
end_date = t['Date/Time'][i]
# 统计年收益率
year = t['Date/Time'][i].year
yearly_return[year] = t['Net Profit - Cum Net Profit'][i]
total_return = t['Capital'][i]/t['Capital'][0]-1
yearly_return = pd.Series(yearly_return) / t['Capital'][0]
first_year = t['Date/Time'][1].year, yearly_return[t['Date/Time'][1].year]
yearly_return = | oup['Date/Time'][i]
temp_max_value = max(temp_max_value, year_group['Capital'][i])
continous = False
else:
if max_draw_down>year_group['Capital'][i]/temp_max_value-1:
if not continous:
continous = True
max_draw_down = year_group['Capital'][i]/temp_max_value-1
else:
if continous:
continous = False
start_date = current_start_date
end_date = year_group['Date/Time'][i]
yearly_drawdown[year_group['Year'][i]] = max_draw_down, start_date, end_date
yearly_return = dict() # 记录年收益率
max_draw_down, temp_max_value = 0, 0
start_date, end_date, current_start_date = 0, 0, 0
continous = False # 是否连续
| identifier_body |
summary6.1.py | :用最后的净值/最先的净值作为比例。(eg. 1.06/1.02)
duration = (ts.index[-1]-ts.index[0]).days/365
# 从第一次交易到最后一次交易的时间
if (duration == 0):
yearly_rate = ts[0]
#如果只有一次交易,时间跨度是0不能计算,则用该交易的收益率作为yearly_rate
else:
yearly_rate = ratio**(1/duration)-1 # 算出年化收益率
return (yearly_rate-risk_free)/ts.std()
def write_sheet1_summation():
current_row = 0
for i in range(len(signal_class_list)):
# 连结profit表和volume表
v = signal_class_list[i].volume
p = signal_class_list[i].profit
w, wc, w_cv, l, lc, l_cv = [],[],[],[],[],[]
maxd = []
s = []
for item in (signal_class_list[i].win[ii] for ii in signal_class_list[i].win.keys()):
w.append(sum(item.values()))
wc.append(len(item))
if wc[-1] != 0:
w_cv.append(np.std(list(item.values())) / (w[-1] / wc[-1]))
else:
w_cv.append(0)
for item in (signal_class_list[i].loss[ii] for ii in signal_class_list[i].loss.keys()):
l.append(sum(item.values()))
lc.append(len(item))
if lc[-1] != 0:
l_cv.append(np.std(list(item.values()))/(l[-1] / lc[-1]))
else:
l_cv.append(0)
for item in (signal_class_list[i].timeseries[ii] for ii in signal_class_list[i].timeseries.keys()):
maxd.append(drawdown_by_signal(pd.DataFrame(item, index=range(1)).T))
s.append(sharp_ratio(pd.Series(item)))
a = pd.DataFrame({'Profit': list(p.values())},
index=pd.MultiIndex.from_product([[signal_class_list[i].name], p.keys()]))
b = pd.DataFrame({'Volume': list(v.values()), 'Max Drawdown': maxd, 'Sharp': s},
index=pd.MultiIndex.from_product([[signal_class_list[i].name], v.keys()]))
c = pd.DataFrame({'Win': w, 'Win Count': wc, 'Win CV': w_cv},
index=pd.MultiIndex.from_product([[signal_class_list[i].name], signal_class_list[i].win.keys()]))
d = pd.DataFrame({'Loss': l, 'Loss Count': lc, 'Loss CV': l_cv},
index=pd.MultiIndex.from_product([[signal_class_list[i].name], signal_class_list[i].loss.keys()]))
joint = a.join(b)
joint['P/V'] = joint['Profit']/joint['Volume']
joint = joint.join(c).join(d)
joint = joint.fillna(0)
joint['Win Rate'] = joint['Win Count'] / (joint['Win Count']+joint['Loss Count'])
joint['Expected Win'] = joint['Win'] / joint['Win Count']
joint['Expected Loss'] = joint['Loss'] / joint['Loss Count']
joint = joint.fillna(0)
joint['Expected Revenue'] = joint['Expected Win'] * joint['Win Rate'] + joint['Expected Loss'] * (1-joint['Win Rate'])
joint = joint.drop(columns=['Win', 'Loss'])
joint.to_excel(writer, 'Sheet1', startrow=current_row)
current_row += len(v.keys())+1
def beautify_excel():
'''
美化输出的excel文件,并将最大回撤率等数据写入excel文件。
'''
global result, table, nrows, font, rule, format_number, format_percent
# 打开文件,读取先前输出的内容
result = openpyxl.load_workbook(folder+f+'[OUTPUT].xlsx')
table = result.worksheets[0]
nrows = table.max_row
# 准备样式(处理字体,取消边框)
font = Font(name='dengxian', size=12)
rule = DataBarRule(start_type='min', start_value=0, end_type='max', end_value=90,
color="FFFF0000", showValue="None", minLength=None, maxLength=None)
format_number = numbers.BUILTIN_FORMATS[40]
format_percent = numbers.BUILTIN_FORMATS[10]
# 设置 Sheet name,添加列名
table.title = '收益统计'
table['A1'].value = 'Open Signal'
table['B1'].value = 'Close Signal'
# 去除重复的列名,去除杂乱的边框
for row in range(nrows+1, 0, -1):
for j in range(len(table[row])):
table[row][j].border = Border(outline=False)
if table[row][j].value == table[1][j].value and row > 1:
table[row][j].value = None
# 加入数据条
table.conditional_formatting.add('C1:C'+str(nrows), rule)
# 设置列宽
table.column_dimensions['A'].width = 13
table.column_dimensions['B'].width = 13
table.column_dimensions['C'].width = 14
table.column_dimensions['D'].width = 14
table.column_dimensions['E'].width = 20
table.column_dimensions['F'].width = 20
table.column_dimensions['G'].width = 10
table.column_dimensions['H'].width = 8
table.column_dimensions['I'].width = 9.5
table.column_dimensions['J'].width = 8
table.column_dimensions['K'].width = 10
table.column_dimensions['L'].width = 9
table.column_dimensions['M'].width = 13
table.column_dimensions['N'].width = 13
table.column_dimensions['O'].width = 16
for c in ['E','H','G','J','M','N','O']:
for irow in range(2,nrows+1):
if table[c+str(irow)].value != None:
table[c+str(irow)].number_format = format_number
for c in ['D', 'L']:
for irow in range(2,nrows+1):
if table[c+str(irow)].value != None:
table[c+str(irow)].number_format = format_percent
def write_yeild_and_drawdown():
temp = drawdown_by_time(t)
table['C'+str(nrows+2)].value='最大回撤'
table['D'+str(nrows+2)].value=temp[0]
table['D'+str(nrows+2)].number_format = format_percent
table['E'+str(nrows+2)].value=temp[1]
table['F'+str(nrows+2)].value=temp[2]
yearly_drawdown = temp[5];i=0
for year in yearly_drawdown.keys():
table[nrows+3+i][2].value = year
table[nrows+3+i][3].value = yearly_drawdown[year][0]
table[nrows+3+i][3].number_format = format_percent
table[nrows+3+i][4].value = yearly_drawdown[year][1]
table[nrows+3+i][5].value = yearly_drawdown[year][2]
i+=1
table['A'+str(nrows+2)].value='累计收益率'
table['B'+str(nrows+2)].value=temp[3]
table['B'+str(nrows+2)].number_format = format_percent
yearly_return = temp[4];i=0
for year in yearly_return.keys():
table[nrows+3+i][0].value = year
table[nrows+3+i][1].value = yearly_return[year]
table[nrows+3+i][1].number_format = format_percent
i+=1
rule1 = DataBarRule(start_type='percentile', start_value=0, end_type='percentile', end_value=99,
color="FFFF0000", showValue="None", minLength=None, maxLength=60)
rule2 = DataBarRule(start_type='percentile', start_value=90, end_type='percentile', end_value=0,
color="FF22ae6b", showValue="None", minLength=None, maxLength=60)
table.conditional_formatting.add('B{}:B{}'.format(nrows+3, nrows+2+i), rule1)
table.conditional_formatting.add('D{}:D{}'.format(nrows+3, nrows+2+i), rule2)
def write_frequency(start_row, start_col):
'''
统计交易次 | 数
'''
table[start_row-1][start_col].value = '每日交易次数'
fd = frequency.describe()
for i in range(len(fd)):
table[start_row+i][start_col].value = fd.keys()[i]
table[start_row+i][start_col+1].value = fd[i]
table[start_row+i][start_col+1].number_format=format_ | conditional_block |
|
client.ts | * @returns preset query on the client.
*/
get query () {
return this._query
}
/**
* @returns preset request options on the client.
*/
get requestOptions () {
return this._requestOptions
}
/**
* Creates (by Object.create) a **new client** instance with given service methods.
* @param servicePrototype service methods that will be mount to client.
* @param servicehost service host for new client.
* @returns a **new client** with with given service methods.
*/
withService<T> (serviceMethod: T, servicehost: string = ''): this & T {
const srv = Object.assign<this, T>(Object.create(this), serviceMethod)
if (servicehost !== '') {
srv._host = servicehost
}
return srv
}
/**
* Creates (by Object.create) a **new client** instance with given request options.
* @param options request options that will be copy into client.
* @returns a **new client** with with given request options.
*/
withOptions (options: RequestOptions): this {
return Object.assign(Object.create(this), {
_requestOptions: Object.assign({}, this._requestOptions, options),
})
}
/**
* Creates (by Object.create) a **new client** instance with given headers.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
withHeaders (headers: Payload): this {
return Object.assign(Object.create(this), {
_headers: Object.assign({}, this._headers, headers),
})
}
/**
* Creates (by Object.create) a **new client** instance with headers copy from the request.
* @param req IncomingMessage object that headers read from.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
forwardHeaders (req: IncomingMessage | any, ...headers: string[]): this {
if (req.req != null && req.req.headers != null) {
req = req.req
}
if (headers.length === 0) {
headers = FORWARD_HEADERS
}
const forwardHeaders: { [key: string]: string | string[] } = {}
for (const header of headers) {
if (req.headers[header] != null) {
forwardHeaders[header] = req.headers[header]
}
}
return this.withHeaders(forwardHeaders)
}
/**
* Creates (by Object.create) a **new client** instance with given query.
* @param query query that will be copy into client.
* @returns a **new client** with with given query.
*/
withQuery (query: Payload): this {
return Object.assign(Object.create(this), {
_query: Object.assign({}, this._query, query),
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Tenant-Id` and `X-Tenant-Type`.
* @param tenantId that will be added to header as `X-Tenant-Id`.
* @param tenantType that will be added to header as `X-Tenant-Type`.
* @returns a **new client** with with given headers.
*/
withTenant (tenantId: string, tenantType = 'organization') {
return this.withHeaders({
'X-Tenant-Id': tenantId,
'X-Tenant-Type': tenantType,
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Operator-ID`.
* @param operatorId that will be added to header as `X-Operator-ID`.
* @returns a **new client** with with given headers.
*/
withOperator (operatorId: string) {
return this.withHeaders({
'X-Operator-ID': operatorId,
})
}
/**
* Creates a JWT token string with given payload and client's appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param options some JWT sign options.
* @returns a token string.
*/
signToken (payload: Payload, options?: jwt.SignOptions) {
return jwt.sign(payload, this._options.appSecrets[0], options)
}
/**
* Creates a periodical changed JWT token string with appId and appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param periodical period in seccond, default to 3600s.
* @param options some JWT sign options.
* @returns a token string.
*/
signAppToken (periodical: number = 3600, options?: jwt.SignOptions) {
const iat = Math.floor(Date.now() / (1000 * periodical)) * periodical
const payload = {
iat,
exp: iat + Math.floor(1.1 * periodical),
_appId: this._options.appId,
}
// token change in every hour, optimizing for server cache.
return this.signToken(payload, options)
}
/**
* Decode a JWT token string to literal object payload.
* @param token token to decode.
* @param options some JWT decode options.
* @returns a literal object.
*/
decodeToken (token: string, options?: jwt.DecodeOptions): Payload {
return jwt.decode(token, options) as Payload
}
/**
* Decode and verify a JWT token string to literal object payload.
* if verify failure, it will throw a 401 error (creates by 'http-errors' module)
* @param token token to decode.
* @param options some JWT verify options.
* @returns a literal object.
*/
verifyToken (token: string, options?: jwt.VerifyOptions): Payload {
let error = null
for (const secret of this._options.appSecrets) {
try {
return jwt.verify(token, secret, options) as Payload
} catch (err) {
error = err
}
}
throw createError(401, error)
}
/**
* request with given method, url and data.
* It will genenrate a jwt token by signToken, and set to 'Authorization' header.
* It will merge headers, query and request options that preset into client.
* @param method method to request.
* @param url url to request, it will be resolved with client host.
* @param data data to request.
* @returns a promise with Response
*/
request (method: string, url: string, data?: any) {
// token change in every hour, optimizing for server cache.
const token = this.signAppToken()
const options: RequestOptions & request.UrlOptions = Object.assign({ url: '' }, this._requestOptions)
options.method = method.toUpperCase()
options.url = urlJoin(this._host, url)
options.qs = Object.assign({}, options.qs, this._query)
options.headers =
Object.assign({}, options.headers, this._headers, { Authorization: `Bearer ${token}` })
if (data != null) {
if (options.method === 'GET') {
options.qs = Object.assign(options.qs, data)
} else {
options.body = data
}
}
return Client.request(options).then((resp) => {
if (resp.statusCode === 200 && Number(resp.headers['x-http-status']) > 0) {
resp.statusCode = Number(resp.headers['x-http-status'])
}
return resp
})
}
/**
* request with `GET` method.
* @returns a promise with Response body
*/
get<T> (url: string, data?: any) {
return this.request('GET', url, data).then(assertRes) as Promise<T>
}
/**
* request with `POST` method.
* @returns a promise with Response body
*/
post<T> (url: string, data?: any) {
return this.request('POST', url, data).then(assertRes) as Promise<T>
}
/**
* request with `PUT` method.
* @returns a promise with Response body
*/
put<T> (url: string, data?: any) {
return this.request('PUT', url, data).then(assertRes) as Promise<T>
}
/**
* request with `PATCH` method.
* @returns a promise with Response body
*/
patch<T> (url: string, data?: any) {
return this.request('PATCH', url, data).then(assertRes) as Promise<T>
}
/**
* request with `DELETE` method.
* @returns a promise with Response body
*/
delete<T> (url: string, data?: any) {
return this.request('DELETE', url, data).then(assertRes) as Promise<T>
}
}
/**.
* @returns true if response' statusCode in [200, 300)
*/
export function isSuccess (res: request.RequestResponse) {
return res.statusCode >= 200 && res.statusCode < 300
}
/**.
* @returns a promise that delay with given ms time.
*/
export function delay (ms: number) | {
return new Promise((resolve) => $setTimeout(resolve, ms))
} | identifier_body |
|
client.ts | null ? Math.floor(options.retryDelay) : 2000
const maxAttempts = options.maxAttempts != null ? Math.floor(options.maxAttempts) : 3
const retryErrorCodes = Array.isArray(options.retryErrorCodes) ? options.retryErrorCodes : RETRIABLE_ERRORS
// default to `false`
options.followRedirect = options.followRedirect === true
let err = null
let attempts = 0
while (attempts < maxAttempts) {
attempts++
try {
const res = await new Promise<request.Response>((resolve, reject) => {
request(options, (error: any, response: request.Response, _body: any) => {
if (error != null) {
reject(error)
} else {
resolve(response)
}
})
})
return Object.assign(res, {
attempts,
originalUrl: options.url as string,
originalMethod: options.method as string,
})
} catch (e) {
err = e
if (!retryErrorCodes.includes(e.code)) {
break
}
await delay(retryDelay)
}
}
throw Object.assign(err, {
attempts,
originalUrl: options.url,
originalMethod: options.method,
})
}
private _options: ClientOptions
private _host: string
private _headers: Payload
private _query: Payload
private _requestOptions: RequestOptions
constructor (options: ClientOptions & RetryOptions) {
if (!MONGO_REG.test(options.appId)) {
throw new Error(`appId: ${options.appId} is not a valid mongo object id`)
}
if (!Array.isArray(options.appSecrets) || options.appSecrets.length === 0) {
throw new Error(`appSecrets required`)
}
if (typeof options.host !== 'string' || options.host === '') {
throw new Error(`host required`)
}
options.timeout = options.timeout == null ? 3000 : options.timeout
options.pool = options.pool == null ?
{ maxSockets: options.maxSockets == null ? 100 : options.maxSockets } : options.pool
options.strictSSL = options.strictSSL === true
options.retryDelay = options.retryDelay == null ? 2000 : options.retryDelay
options.maxAttempts = options.maxAttempts == null ? 3 : options.maxAttempts
this._options = options
this._host = options.host
this._headers = { 'User-Agent': UA }
this._query = {}
this._requestOptions = {
json: true,
forever: true,
strictSSL: options.strictSSL,
timeout: options.timeout,
cert: options.certChain,
key: options.privateKey,
ca: options.rootCert,
pool: options.pool,
time: options.time,
retryDelay: options.retryDelay,
maxAttempts: options.maxAttempts,
retryErrorCodes: options.retryErrorCodes,
useQuerystring: options.useQuerystring,
} as RequestOptions
}
/**
* @returns User-Agent on the client.
*/
get UA (): string {
const ua = this._headers['User-Agent']
return ua == null ? '' : ua
}
/**
* Set User-Agent to the client.
* @param ua User-Agent string.
*/
set UA (ua: string) {
this._headers['User-Agent'] = ua
}
/**
* @returns host on the client.
*/
get host () {
return this._host
}
/**
* @returns preset headers on the client.
*/
get headers () {
return this._headers
}
/**
* @returns preset query on the client.
*/
get query () {
return this._query
}
/**
* @returns preset request options on the client.
*/
get requestOptions () {
return this._requestOptions
}
/**
* Creates (by Object.create) a **new client** instance with given service methods.
* @param servicePrototype service methods that will be mount to client.
* @param servicehost service host for new client.
* @returns a **new client** with with given service methods.
*/
withService<T> (serviceMethod: T, servicehost: string = ''): this & T {
const srv = Object.assign<this, T>(Object.create(this), serviceMethod)
if (servicehost !== '') {
srv._host = servicehost
}
return srv
}
/**
* Creates (by Object.create) a **new client** instance with given request options.
* @param options request options that will be copy into client.
* @returns a **new client** with with given request options.
*/
withOptions (options: RequestOptions): this {
return Object.assign(Object.create(this), {
_requestOptions: Object.assign({}, this._requestOptions, options),
})
}
/**
* Creates (by Object.create) a **new client** instance with given headers.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
withHeaders (headers: Payload): this {
return Object.assign(Object.create(this), {
_headers: Object.assign({}, this._headers, headers),
})
}
/**
* Creates (by Object.create) a **new client** instance with headers copy from the request.
* @param req IncomingMessage object that headers read from.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
forwardHeaders (req: IncomingMessage | any, ...headers: string[]): this {
if (req.req != null && req.req.headers != null) {
req = req.req
}
if (headers.length === 0) {
headers = FORWARD_HEADERS
}
const forwardHeaders: { [key: string]: string | string[] } = {}
for (const header of headers) {
if (req.headers[header] != null) {
forwardHeaders[header] = req.headers[header]
}
}
return this.withHeaders(forwardHeaders)
}
/**
* Creates (by Object.create) a **new client** instance with given query.
* @param query query that will be copy into client.
* @returns a **new client** with with given query.
*/
withQuery (query: Payload): this {
return Object.assign(Object.create(this), {
_query: Object.assign({}, this._query, query),
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Tenant-Id` and `X-Tenant-Type`.
* @param tenantId that will be added to header as `X-Tenant-Id`.
* @param tenantType that will be added to header as `X-Tenant-Type`.
* @returns a **new client** with with given headers.
*/
withTenant (tenantId: string, tenantType = 'organization') {
return this.withHeaders({
'X-Tenant-Id': tenantId,
'X-Tenant-Type': tenantType,
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Operator-ID`.
* @param operatorId that will be added to header as `X-Operator-ID`.
* @returns a **new client** with with given headers.
*/
withOperator (operatorId: string) {
return this.withHeaders({
'X-Operator-ID': operatorId,
})
}
/**
* Creates a JWT token string with given payload and client's appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param options some JWT sign options.
* @returns a token string.
*/
signToken (payload: Payload, options?: jwt.SignOptions) {
return jwt.sign(payload, this._options.appSecrets[0], options)
}
/**
* Creates a periodical changed JWT token string with appId and appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param periodical period in seccond, default to 3600s.
* @param options some JWT sign options.
* @returns a token string.
*/
| (periodical: number = 3600, options?: jwt.SignOptions) {
const iat = Math.floor(Date.now() / (1000 * periodical)) * periodical
const payload = {
iat,
exp: iat + Math.floor(1.1 * periodical),
_appId: this._options.appId,
}
// token change in every hour, optimizing for server cache.
return this.signToken(payload, options)
}
/**
* Decode a JWT token string to literal object payload.
* @param token token to decode.
* @param options some JWT decode options.
* @returns a literal object.
*/
decodeToken (token: string, options?: jwt.DecodeOptions): Payload {
return jwt.decode(token, options) as Payload
}
/**
* Decode and verify a JWT token string to literal object payload.
* if verify failure, it will throw a 401 error (creates by 'http-errors' module)
* @param token token to decode.
* @param options some | signAppToken | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.