file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
rate_limit_config_reconciler.sk.go
|
// Code generated by solo-kit. DO NOT EDIT.
package v1alpha1
import (
"github.com/solo-io/go-utils/contextutils"
"github.com/solo-io/solo-kit/pkg/api/v1/clients"
"github.com/solo-io/solo-kit/pkg/api/v1/reconcile"
"github.com/solo-io/solo-kit/pkg/api/v1/resources"
)
// Option to copy anything from the original to the desired before writing. Return value of false means don't update
type TransitionRateLimitConfigFunc func(original, desired *RateLimitConfig) (bool, error)
type RateLimitConfigReconciler interface {
Reconcile(namespace string, desiredResources RateLimitConfigList, transition TransitionRateLimitConfigFunc, opts clients.ListOpts) error
}
func rateLimitConfigsToResources(list RateLimitConfigList) resources.ResourceList {
|
for _, rateLimitConfig := range list {
resourceList = append(resourceList, rateLimitConfig)
}
return resourceList
}
func NewRateLimitConfigReconciler(client RateLimitConfigClient) RateLimitConfigReconciler {
return &rateLimitConfigReconciler{
base: reconcile.NewReconciler(client.BaseClient()),
}
}
type rateLimitConfigReconciler struct {
base reconcile.Reconciler
}
func (r *rateLimitConfigReconciler) Reconcile(namespace string, desiredResources RateLimitConfigList, transition TransitionRateLimitConfigFunc, opts clients.ListOpts) error {
opts = opts.WithDefaults()
opts.Ctx = contextutils.WithLogger(opts.Ctx, "rateLimitConfig_reconciler")
var transitionResources reconcile.TransitionResourcesFunc
if transition != nil {
transitionResources = func(original, desired resources.Resource) (bool, error) {
return transition(original.(*RateLimitConfig), desired.(*RateLimitConfig))
}
}
return r.base.Reconcile(namespace, rateLimitConfigsToResources(desiredResources), transitionResources, opts)
}
|
var resourceList resources.ResourceList
|
mod.rs
|
mod project_task;
use crate::{Client, Result};
use project_task::ProjectTask;
|
let project_assignments = client.get_project_assignments().await?;
let project_tasks = ProjectTask::from(&project_assignments);
for project_task in project_tasks {
println!("{}", project_task);
}
Ok(())
}
|
pub async fn list(client: &Client) -> Result<()> {
|
code_cr.py
|
import re
import numpy as np
import pandas as pd
import requests #์นํต์
import json
from pmdarima.arima import ndiffs
import pmdarima as pm
from pykrx import stock
from bs4 import BeautifulSoup
import html5lib
# ==============
# ์
์ข
๋ถ๋ฅ
# ==============
# -------- ๋์ผ ์
์ข
๊ธฐ์
์ถ๋ ฅ
# TODO(๋ฏธ์์ฑ) ๋์ผ ์
์ข
์ ํ
def select_same_industry(corp_name):
indus=com_df[com_df['nm']==corp_name]['industry'].values[0] # TODO(df ํ์ธ)
# print(com_df.groupby(by='industry')['nm'].nunique().max()) # ๋์ข
์
๊ณ ์ต๋ 151๊ฐ -> 151๊ฐ ์ฌ๋ฌด์ ํ ํฌ๋กค๋ง?
list_com=com_df[com_df['industry']==indus]['corp_name'].values.tolist()
return list_com
# -------- ๋ค์ด๋ฒ์ฆ๊ถ ์ฐ๊ด๊ธฐ์
์ฝ๋(hjh)
def relate_code_crawl(co):
#์ฐ๊ด ์ข
๋ชฉ์ฝ๋ ์๋ ํ์ด์ง ๋ถ๋ฌ์ค๊ธฐ
url='https://finance.naver.com/item/main.naver?code='+str(co)
page=pd.read_html(url,encoding='CP949')
#์ฐ๊ด ์ข
๋ชฉ๋ช
๊ณผ ์ข
๋ชฉ์ฝ๋ ๋ฝ์๋ด๊ธฐ(code_list[0]์ '์ข
๋ชฉ๋ช
'์ด์ด์ ์ ์ธ)
code_list=page[4].columns.tolist()
code_list=code_list[1:]
#์ข
๋ชฉ์ฝ๋ ๋ฆฌ์คํธ ๋ฐํ
codes=[]
for word in (code_list):
codes.append(word[-6:])
#print(codes)
return codes
#relate_code_crawl('000660')
# ==============
# ๊ธฐ์
์ด๋ฆ ์ฝ๋ ๋ณํ
# ==============
# -------- ๋ค์ด๋ฒ ์ฌ๋ฌด์ ํ ํฌ๋กค๋ง ์ฉ gicode๋ก ๋ณํ
def nm_to_bs_gicode(corp_name):
gi=com_df[com_df['nm']==corp_name]['cd']
gi=gi.values[0]
return gi
def stc_code_to_bs_gicode(stock_code):
gi = com_df[com_df['stock_code'] == stock_code]['cd']
gi = gi.values[0]
return gi
def yh_code_to_bs_gicode(yh_code):
gi = com_df[com_df['yh_code'] == yhcode]['cd']
gi = gi.values[0]
return gi
# -------- ๋ค์ด๋ฒ ๊ธ์ต ํฌ๋กค๋ง ์ฉ gicode๋ก ๋ณํ
def nm_to_fn_gicode(corp_name):
gi=com_df[com_df['nm']==corp_name]['stock_code']
gi=gi.values[0]
return gi
def yh_code_to_fn_gicode(yh_code):
gi=com_df[com_df['yh_code']==yh_code]['stock_code']
gi=gi.values[0]
return gi
# -------- ์ฝ๋๋ฅผ ๊ธฐ์
์ด๋ฆ์ผ๋ก ๋ณํ
def stc_code_to_nm(stock_code):
gi = com_df[com_df['stock_code'] == stock_code]['nm']
gi = gi.values[0]
return gi
def yh_code_to_nm(yh_code):
gi = com_df[com_df['yh_code'] == yh_code]['nm']
gi = gi.values[0]
return gi
# ==============
# ๋ฐ์ดํฐ ์์ง
# ==============
# -------- Balance Sheets API call
# def bs_api(corp_name=None, yh_code=None, stock_code=None):
# print('haha')
# -------- Balance Sheets Crawling(์ฌ๋ฌด์ ํ ํฌ๋กค๋ง)
# 220220 ์์
# 1) ๋งค๊ฐ๋ณ์ stock_code๋ก ์ถ์ฝ
# 2) kind๋ก ํน์ ํ
์ด๋ธ ์ง์ ํ๋ ๋์ ๋ฐ์ดํฐํ๋ ์ ๋ฆฌ์คํธ ์ ์ฒด ๋ฐํ
# 3) '~๊ณ์ฐ์ ์ฐธ์ฌํ ๊ณ์ ํผ์น๊ธฐ' ์ ๊ฑฐ๋ ์ ํ์ฌํญ์ผ๋ก ๋
def bs_craw(stock_code, clear_name=False): # ------- ๊ฒ์๊ณผ ์ฐ๋ํด์ ์
๋ ฅ ๋ณ์ ์ค์
"""
# kind
: 0 (์ฐ๊ฐ ํฌ๊ด์์ต๊ณ์ฐ์), 1 (๋ถ๊ธฐ๋ณ ํฌ๊ด์์ต๊ณ์ฐ์)
2 (์ฐ๊ฐ ์ฌ๋ฌด์ํํ), 3 (๋ถ๊ธฐ๋ณ ์ฌ๋ฌด์ํํ)
4 (์ฐ๊ฐ ํ๊ธํ๋ฆํ), 5 (๋ถ๊ธฐ๋ณ ํ๊ธํ๋ฆํ)
"""
# ------- ๊ฒ์๊ณผ ์ฐ๋ํด์ ์
๋ ฅ๋๋ ๋ณ์ ๋ฐ๋ผ gicode(๋ค์ด๋ฒ์์ ๋ถ๋ฅํ๋ ๊ธฐ์
์ฝ๋)๋ก ๋ณํ
gcode = stc_code_to_bs_gicode(stock_code)
url = f"http://comp.fnguide.com/SVO2/ASP/SVD_Finance.asp?NewMenuID=103&gicode={gcode}"
table_list = pd.read_html(url, encoding='UTF-8')
# ํญ๋ชฉ์์ ๋ถํ์ํ ๋ถ๋ถ ์ ๊ฑฐ('๊ณ์ฐ์ ์ฐธ์ฌํ ๊ณ์ ํผ์น๊ธฐ')
if clear_name == False:
return table_list
else:
new_table_list = []
for tbl in table_list:
for i, idx in enumerate(tbl.iloc[:, 0]):
m = idx.replace('๊ณ์ฐ์ ์ฐธ์ฌํ ๊ณ์ ํผ์น๊ธฐ', '')
tbl.iloc[i, 0] = m
new_table_list.append(tbl)
return new_table_list
# ------- ๋ค์ด๋ฒ ๊ธ์ต
# 220220 ์์
# 1) ๋งค๊ฐ๋ณ์ stock_code๋ก ์ถ์ฝ
# 2) kind๋ก ํน์ ํ
์ด๋ธ ์ง์ ํ๋ ๋์ ๋ฐ์ดํฐํ๋ ์ ๋ฆฌ์คํธ ์ ์ฒด ๋ฐํ
def fn_craw(stock_code):
"""
# kind
: 0 (์ ์ผ&๋น์ผ ์ํ๊ฐ, ํํ๊ฐ, ๊ฑฐ๋๋ ๋ฑ) #TODO ๊ฐ๊ณต ํ์
1 (์ฆ๊ถ์ฌ ๋ณ ๋งค๋ ๋งค์ ์ ๋ณด) #TODO ๊ฐ๊ณต ํ์(์ปฌ๋ผ์ด๋ฆ)
2 (์ธ๊ตญ์ธ, ๊ธฐ๊ด ๊ฑฐ๋ ์ ๋ณด) #TODO ๊ฐ๊ณต ํ์
3 (๊ธฐ์
์ค์ ๋ถ์(์ฐ๋๋ณ ๋ถ๊ธฐ๋ณ ์ฃผ์์ฌ๋ฌด ์ ๋ณด)) #TODO ๊ฐ๊ณต ํ์?
4 (๋์ผ์
์ข
๋น๊ต) #TODO ๊ฐ๊ณต ํ์?
5 (์๊ฐ์ด์ก, ์ฃผ์์, ์ก๋ฉด๊ฐ ์ ๋ณด) #TODO ๊ฐ๊ณต ํ์
6 (์ธ๊ตญ์ธ ์ฃผ์ ํ๋, ๋ณด์ ์ ๋ณด)
7 (๋ชฉํ์ฃผ๊ฐ ์ ๋ณด) #TODO ๊ฐ๊ณต ํ์
8 (PER, PBR ๋ฐฐ๋น์์ต๋ฅ ์ ๋ณด) (์ฃผ๊ฐ ๋ฐ๋ผ ๋ณ๋) #TODO ๊ฐ๊ณต ํ์
9 (๋์ผ์
์ข
PER, ๋ฑ๋ฝ๋ฅ ์ ๋ณด) #TODO ๊ฐ๊ณต ํ์
10 (ํธ๊ฐ 10๋จ๊ณ)
11 (์ธ๊ธฐ ๊ฒ์ ์ข
๋ชฉ: ์ฝ์คํผ) #TODO ๊ฐ๊ณต ํ์
12 (์ธ๊ธฐ ๊ฒ์ ์ข
๋ชฉ: ์ฝ์ค๋ฅ) #TODO ๊ฐ๊ณต ํ์
"""
gcode = str(stock_code)
url = f"https://finance.naver.com/item/main.naver?code={gcode}"
table_list = pd.read_html(url, encoding='euc-kr')
return table_list
# ==============
# ์งํ ์ ์
# ==============
# 220222 ๋ ์จ ์์ ์์ ---------------------------------------------
# -------- ์งํ ์ ์
# 220220 ์์
# 1) ๋งค๊ฐ๋ณ์ stock_code๋ก ์ถ์ฝ
# 2) ๋ฐ์ดํฐํ๋ ์ ํ๋๊ฐ ์๋ ๋ฆฌ์คํธ๋ก ๋ฐ์์ค๊ธฐ๋๋ฌธ์ kind ์ ๊ฑฐํ๊ณ ์ง์ ์ ํํด์ค
# 3) sli_df_y, sil_df_q ์์ '-' ๊ฐ๊ณต ์ if ์กฐ๊ฑด์ ๋ฐ๋ผ ์ฒ๋ฆฌํ๋ ๋์ lambda์ re.sub ์ด์ฉ
# 4) dict ๋์ array๋ก ๋ฐํ, ๊ธฐ์
์ด๋ฆ(nm๋ ๋ฐํ)
def idv_radar_weather_data(stock_code):
"""
# <์งํ ์ค๋ช
>
# 1. ๋ฐฐ๋น ๋ถ์ -> ๋ฐฐ๋น์ฑํฅ(๋ฐฐ๋น ์ปค๋ฒ๋ฆฌ์ง์ ์ญ์.)
# 2. ์ ๋์ฑ ๋ถ์(๋จ๊ธฐ์ฑ๋ฌด์ง๊ธ๋ฅ๋ ฅ) -> ๋น์ข๋น์จ(๋น์ข์์ฐ / ์ ๋๋ถ์ฑ)
# 3. ์ฌ๋ฌด๊ฑด์ ์ฑ ๋ถ์(๋ ๋ฒ๋ฆฌ์ง ๋น์จ) -> ๋ถ์ฑ๋น์จ(์ด๋ถ์ฑ / ์๊ธฐ์๋ณธ)์ ์ญ์
# 4. ์์ต์ฑ๋ถ์ -> ๋งค์ถ์์ต์ฑ(๋น๊ธฐ์์ด์ต/๋งค์ถ์ก))
# 5. ์ฑ์ฅ์ฑ๋ถ์ -> ์์ด์ต์ฑ์ฅ๋ฅ
"""
gcode = stock_code
nm = stc_code_to_nm(stock_code)
sil_df = fn_craw(gcode)[3] # 3: ๊ธฐ์
์ค์ ์ ๋ณด ์ฌ๋ฌด์ ํ (220220 ์์ )
foreign_ms = fn_craw(gcode)[2].loc[1, '์ธ๊ตญ์ธ'] # 2 : ์ธ๊ตญ์ธ, ๊ธฐ๊ด ๊ฑฐ๋ ์ ๋ณด
giguan_ms = fn_craw(gcode)[2].loc[1, '๊ธฐ๊ด'] # 2 : ์ธ๊ตญ์ธ, ๊ธฐ๊ด ๊ฑฐ๋ ์ ๋ณด
if (sil_df.iloc[0:8, 3].isna().sum()) > 0: # ํ ์ ๊ฐ๋ฅด๊ณ ๊ณ์ฐํ๋ ๊ฑด ์ ๊ท ์์ฅ ๊ธฐ์
์ ์ ๋ณด๊ฐ ์์ ์๊ธฐ ๋๋ฌธ
pass
elif (sil_df.iloc[0:8, 9].isna().sum()) > 0: # ํ ์ ๊ฐ๋ฅด๊ณ ๊ณ์ฐํ๋ ๊ฑด ์ ๊ท ์์ฅ ๊ธฐ์
์ ์ ๋ณด๊ฐ ์์ ์๊ธฐ ๋๋ฌธ
pass
else:
# 0. ์ฌ๋ฌด์ ๋ณด๋ ์ต์ ๋ถ๊ธฐ ์ค๊ณต์ ๊ธฐ์ค
# 0. ๋จ, ๋ฐฐ๋น์ 1๋
์ ํ ๋ฒ ์ด๋ฃจ์ด์ง๊ธฐ ๋๋ฌธ์ ์ต์ ๋
๋ ๊ณต์ ๊ธฐ์ค์
sil_df_y = sil_df['์ต๊ทผ ์ฐ๊ฐ ์ค์ '].iloc[:, 2] # ๋๋ฆฌ์ง๋ง .iloc์ผ๋ก ํ๋ ์ด์ ๋ ๊ณต์ ๋ ์ง๊ฐ ๋ค๋ฅธ ๊ธฐ์
์ด ์๊ธฐ ๋๋ฌธ
sil_df_q = sil_df['์ต๊ทผ ๋ถ๊ธฐ ์ค์ '].iloc[:, 4]
sil_df_y = sil_df_y.fillna(0)
sil_df_q = sil_df_q.fillna(0)
if sil_df_y.dtype == 'O':
sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_y = sil_df_y.astype('float')
if sil_df_q.dtype == 'O':
sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_q = sil_df_q.astype('float')
# 1. ๋ฐฐ๋น์ฑํฅ(bd_tend)
bd_tend = sil_df_y[15] # ์ค์ ๋ฐฐ๋น ์ฑํฅ
# 2. ์ ๋์ฑ ๋ถ์ - ๋น์ข๋น์จ(๋น์ข์์ฐ/์ ๋๋ถ์ฑ)
# ๋น์ข์์ฐ = (์ ๋์์ฐ - ์ฌ๊ณ ์์ฐ)
dj_rate = sil_df_q[7] # ๋น์ข๋น์จ
# 3. ์ฌ๋ฌด๊ฑด์ ์ฑ ๋ถ์ - ๋ถ์ฑ๋น์จ(์ด๋ถ์ฑ/์๊ธฐ์๋ณธ)์ ์ญ์
bch_rate = sil_df_q[6] / 100 # ๋ถ์ฑ๋น์จ
bch_rate = round((1 / bch_rate) * 100, 2)
# 4. ์์ต์ฑ ๋ถ์ - ๋งค์ถ์์ต์ฑ(๋น๊ธฐ์์ด์ต/๋งค์ถ์ก) # TODO ๋งค์ถ์ก 0์ธ ์ ๋ค์?
dg_bene = sil_df_q[2]
mch = sil_df_q[0]
suyk = round((dg_bene / mch) * 100, 2)
# 5. ์ฑ์ฅ์ฑ ๋ถ์ - ์์ด์ต์ฑ์ฅ๋ฅ (์ง์์ฑ์ฅ ๊ฐ๋ฅ๋ฅ )
# (1-๋ฐฐ๋น์ฑํฅ)*์๊ธฐ์๋ณธ์์ด์ต๋ฅ (ROE)
# ์ ๋ณด์จ
roe = sil_df_y[5] / 100
ubo = (100 - bd_tend) / 100
grth = round(roe * ubo * 100, 2)
data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])
# weather part----------------
# PER?
weather_per = sil_df_y[10]
# PBR
weather_pbr = sil_df_y[12]
# ROE
weather_roe = sil_df_y[5]
# EPS
weather_eps = sil_df_y[9]
# BPS
weather_bps = sil_df_y[11]
# array
weather_arr = np.array([weather_per, weather_pbr, weather_roe, weather_eps, weather_bps])
return data_arr, weather_arr, nm, foreign_ms, giguan_ms
# ์์ ์์ ์์
# -------- ๊ด๋ จ ๊ธฐ์
์งํ ์ ์ (์๋์ ๋น์จ ๊ธฐ์ค)
# 220220 ์์
# 1) ๋งค๊ฐ๋ณ์ stock_code๋ก ์ถ์ฝ
# 2) dict ๋์ array๋ก ๋ฐํ, ๊ธฐ์
์ด๋ฆ(nm๋ ๋ฐํ)
# 220222 ๋ ์จ
def relate_radar_weather_data(stock_code):
label_list = ['๋ฐฐ๋น์ฑํฅ', '์ ๋์ฑ', '๊ฑด์ ์ฑ', '์์ต์ฑ', '์ฑ์ฅ์ฑ']
arr_list = []
# ์ฃผ์ ์ฝ๋,์ด๋ฆ์ผ๋ก ๋ณํ
gcode = stock_code
relate_corp = relate_code_crawl(co=gcode)
# ๋ค์ฏ ๊ฐ ํ์ฌ๊ฐ ์์ ์๋ค
arr_list = [idv_radar_weather_data(stock_code=stcd) for stcd in relate_corp]
# arr_list์์ ๋ฐ์ดํฐ ๋ถ๋ฆฌ
radar_list = [x[0] for x in arr_list if x is not None]
weather_list = [x[1] for x in arr_list if x is not None]
nm_list = [x[2] for x in arr_list if x is not None]
# ์ธ์ธ ๋งค์, ๊ธฐ๊ด ๋งค์
try:
foreign_ms = arr_list[0][3]
except TypeError:
foreign_ms=0.01
try:
giguan_ms = arr_list[0][4]
except TypeError:
giguan_ms=0.01
# radar_chart_data
radar_list = np.array(radar_list)
radar_list[:, 0] = (radar_list[:, 0] / radar_list[:, 0].mean()) * 100
radar_list[:, 1] = (radar_list[:, 1] / radar_list[:, 1].mean()) * 100
radar_list[:, 2] = (radar_list[:, 2] / radar_list[:, 2].mean()) * 100
radar_list[:, 3] = (radar_list[:, 3] / radar_list[:, 3].mean()) * 100
radar_list[:, 4] = (radar_list[:, 4] / radar_list[:, 4].mean()) * 100
# radar_chart_dict
radar_dict_list = []
for i, nm in enumerate(nm_list):
dic = {}
dic[nm] = radar_list[i, :].tolist()
radar_dict_list.append(dic)
# weather_chart_data
weather_list = np.array(weather_list)
weather_list[:, 0] = (weather_list[:, 0] / weather_list[:, 0].mean()) # ๊ฐ ๊ธฐ์
์ ํ๊ท ๋๋น PER
weather_list[:, 1] = (weather_list[:, 1] / weather_list[:, 1].mean()) # ๊ฐ ๊ธฐ์
์ ํ๊ท ๋๋น PBR
weather_list[:, 2] = (weather_list[:, 2] / weather_list[:, 2].mean()) # ๊ฐ ๊ธฐ์
์ ํ๊ท ๋๋น ROE
weather_list[:, 3] = (weather_list[:, 3] / weather_list[:, 3].mean()) # ๊ฐ ๊ธฐ์
์ ํ๊ท ๋๋น EPS
weather_list[:, 4] = (weather_list[:, 4] / weather_list[:, 4].mean()) # ๊ฐ ๊ธฐ์
์ ํ๊ท ๋๋น BPS
weather_list=np.round(weather_list, 2)
return label_list, radar_dict_list, weather_list[0], foreign_ms, giguan_ms
# 220222 ๋ ์จ ์์ ๋ ---------------------------------------------
# ==============
# ์งํ ์ ์
# ==============
# -------- ์งํ ์ ์
# 220220 ์์
# 1) ๋งค๊ฐ๋ณ์ stock_code๋ก ์ถ์ฝ
# 2) ๋ฐ์ดํฐํ๋ ์ ํ๋๊ฐ ์๋ ๋ฆฌ์คํธ๋ก ๋ฐ์์ค๊ธฐ๋๋ฌธ์ kind ์ ๊ฑฐํ๊ณ ์ง์ ์ ํํด์ค
# 3) sli_df_y, sil_df_q ์์ '-' ๊ฐ๊ณต ์ if ์กฐ๊ฑด์ ๋ฐ๋ผ ์ฒ๋ฆฌํ๋ ๋์ lambda์ re.sub ์ด์ฉ
# 4) dict ๋์ array๋ก ๋ฐํ, ๊ธฐ์
์ด๋ฆ(nm๋ ๋ฐํ)
def idv_radar_data(stock_code):
"""
# <์งํ ์ค๋ช
>
# 1. ๋ฐฐ๋น ๋ถ์ -> ๋ฐฐ๋น์ฑํฅ(๋ฐฐ๋น ์ปค๋ฒ๋ฆฌ์ง์ ์ญ์.)
# 2. ์ ๋์ฑ ๋ถ์(๋จ๊ธฐ์ฑ๋ฌด์ง๊ธ๋ฅ๋ ฅ) -> ๋น์ข๋น์จ(๋น์ข์์ฐ / ์ ๋๋ถ์ฑ)
# 3. ์ฌ๋ฌด๊ฑด์ ์ฑ ๋ถ์(๋ ๋ฒ๋ฆฌ์ง ๋น์จ) -> ๋ถ์ฑ๋น์จ(์ด๋ถ์ฑ / ์๊ธฐ์๋ณธ)์ ์ญ์
# 4. ์์ต์ฑ๋ถ์ -> ๋งค์ถ์์ต์ฑ(๋น๊ธฐ์์ด์ต/๋งค์ถ์ก))
# 5. ์ฑ์ฅ์ฑ๋ถ์ -> ์์ด์ต์ฑ์ฅ๋ฅ
"""
gcode = stock_code
nm = stc_code_to_nm(stock_code)
sil_df = fn_craw(gcode)[3] # 3: ๊ธฐ์
์ค์ ์ ๋ณด ์ฌ๋ฌด์ ํ (220220 ์์ )
if (sil_df.iloc[0:8, 3].isna().sum()) > 0: # ํ ์ ๊ฐ๋ฅด๊ณ ๊ณ์ฐํ๋ ๊ฑด ์ ๊ท ์์ฅ ๊ธฐ์
์ ์ ๋ณด๊ฐ ์์ ์๊ธฐ ๋๋ฌธ
pass
elif (sil_df.iloc[0:8, 9].isna().sum()) > 0: # ํ ์ ๊ฐ๋ฅด๊ณ ๊ณ์ฐํ๋ ๊ฑด ์ ๊ท ์์ฅ ๊ธฐ์
์ ์ ๋ณด๊ฐ ์์ ์๊ธฐ ๋๋ฌธ
pass
else:
# 0. ์ฌ๋ฌด์ ๋ณด๋ ์ต์ ๋ถ๊ธฐ ์ค๊ณต์ ๊ธฐ์ค
# 0. ๋จ, ๋ฐฐ๋น์ 1๋
์ ํ ๋ฒ ์ด๋ฃจ์ด์ง๊ธฐ ๋๋ฌธ์ ์ต์ ๋
๋ ๊ณต์ ๊ธฐ์ค์
sil_df_y = sil_df['์ต๊ทผ ์ฐ๊ฐ ์ค์ '].iloc[:, 2] # ๋๋ฆฌ์ง๋ง .iloc์ผ๋ก ํ๋ ์ด์ ๋ ๊ณต์ ๋ ์ง๊ฐ ๋ค๋ฅธ ๊ธฐ์
์ด ์๊ธฐ ๋๋ฌธ
sil_df_q = sil_df['์ต๊ทผ ๋ถ๊ธฐ ์ค์ '].iloc[:, 4]
sil_df_y = sil_df_y.fillna(0)
sil_df_q = sil_df_q.fillna(0)
if sil_df_y.dtype == 'O':
sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_y = sil_df_y.astype('float')
if sil_df_q.dtype == 'O':
sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_q = sil_df_q.astype('float')
# 1. ๋ฐฐ๋น์ฑํฅ(bd_tend)
bd_tend = sil_df_y[15] # ์ค์ ๋ฐฐ๋น ์ฑํฅ
# 2. ์ ๋์ฑ ๋ถ์ - ๋น์ข๋น์จ(๋น์ข์์ฐ/์ ๋๋ถ์ฑ)
# ๋น์ข์์ฐ = (์ ๋์์ฐ - ์ฌ๊ณ ์์ฐ)
dj_rate = sil_df_q[7] # ๋น์ข๋น์จ
# 3. ์ฌ๋ฌด๊ฑด์ ์ฑ ๋ถ์ - ๋ถ์ฑ๋น์จ(์ด๋ถ์ฑ/์๊ธฐ์๋ณธ)์ ์ญ์
bch_rate = sil_df_q[6] / 100 # ๋ถ์ฑ๋น์จ
bch_rate = round((1 / bch_rate) * 100, 2)
# 4. ์์ต์ฑ ๋ถ์ - ๋งค์ถ์์ต์ฑ(๋น๊ธฐ์์ด์ต/๋งค์ถ์ก) # TODO ๋งค์ถ์ก 0์ธ ์ ๋ค์?
dg_bene = sil_df_q[2]
mch = sil_df_q[0]
suyk = round((dg_bene / mch) * 100, 2)
# 5. ์ฑ์ฅ์ฑ ๋ถ์ - ์์ด์ต์ฑ์ฅ๋ฅ (์ง์์ฑ์ฅ ๊ฐ๋ฅ๋ฅ )
# (1-๋ฐฐ๋น์ฑํฅ)*์๊ธฐ์๋ณธ์์ด์ต๋ฅ (ROE)
# ์ ๋ณด์จ
roe = sil_df_y[5] / 100
ubo = (100 - bd_tend) / 100
grth = round(roe * ubo * 100, 2)
data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])
return data_arr, nm
# -------- ๊ด๋ จ ๊ธฐ์
์งํ ์ ์ (์๋์ ๋น์จ ๊ธฐ์ค)
# 220220 ์์
# 1) ๋งค๊ฐ๋ณ์ stock_code๋ก ์ถ์ฝ
# 2) dict ๋์ array๋ก ๋ฐํ, ๊ธฐ์
์ด๋ฆ(nm๋ ๋ฐํ)
def relate_radar_data(stock_code):
label_list = ['๋ฐฐ๋น์ฑํฅ', '์ ๋์ฑ', '๊ฑด์ ์ฑ', '์์ต์ฑ', '์ฑ์ฅ์ฑ']
arr_list = []
# ์ฃผ์ ์ฝ๋,์ด๋ฆ์ผ๋ก ๋ณํ
gcode = stock_code
relate_corp = relate_code_crawl(co=gcode)
arr_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]
nm_list = [x[1]
|
ist if x is not None]
arr_list = [x[0] for x in arr_list if x is not None]
arr_list = np.array(arr_list)
arr_list[:, 0] = (arr_list[:, 0] / arr_list[:, 0].mean()) * 100
arr_list[:, 1] = (arr_list[:, 1] / arr_list[:, 1].mean()) * 100
arr_list[:, 2] = (arr_list[:, 2] / arr_list[:, 2].mean()) * 100
arr_list[:, 3] = (arr_list[:, 3] / arr_list[:, 3].mean()) * 100
arr_list[:, 4] = (arr_list[:, 4] / arr_list[:, 4].mean()) * 100
dict_list = []
for i, nm in enumerate(nm_list):
dic = {}
dic[nm] = arr_list[i, :].tolist()
dict_list.append(dic)
return label_list, dict_list
# -------- ๊ด๋ จ ๊ธฐ์
์งํ ์ ์ (์๋ณธ)
# def relate_radar_data(yh_code=None, corp_name=None, stock_code=None):
# label_list=['๋ฐฐ๋น์ฑํฅ', '์ ๋์ฑ', '๊ฑด์ ์ฑ', '์์ต์ฑ', '์ฑ์ฅ์ฑ']
# dict_list = []
#
# # ์ฃผ์ ์ฝ๋๋ก ๋ณํ
# gcode = 0
# if yh_code != None:
# gcode = yh_code_to_fn_gicode(yh_code)
# elif corp_name != None:
# gcode = nm_to_fn_gicode(corp_name)
# elif stock_code != None:
# gcode = stock_code
#
# relate_corp = relate_code_crawl(co=gcode)
#
# dict_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]
#
# dict_list = [x for x in dict_list if x is not None]
#
#
# return label_list, dict_list
# ==============
# ์๊ฐํ
# ==============
# -------- ๋งค์ถ, ๋น๊ธฐ์์ด์ต ์ถ์ด ๊ทธ๋ํ
# 220220 ์์
# 1) ๋งค๊ฐ๋ณ์ stock_code๋ก ์ถ์ฝ
# 2) ํฌ๋กค๋งํ ๋ฐ์ดํฐ๋ list๋ก ๋ฐ์์ค๋ฏ๋ก kind ์์ ๊ณ ์ง์ ์ธ๋ฑ์ค ์ฒ๋ฆฌ
def mch_dg(stock_code):
gcode = stock_code
nm = stc_code_to_nm(stock_code)
bs_df = bs_craw(stock_code=gcode)[0]
label_list = bs_df.columns[1:6].tolist() # ๋ค ๋ถ๊ธฐ + ์ ๋
๋๊ธฐ
mch_list = bs_df.loc[0, label_list].tolist() # ๋งค์ถ์ก
dg_list = bs_df.loc[15, label_list].tolist() # ๋น๊ธฐ์์ด์ต
return label_list, mch_list, dg_list
def icon_selection(index_array):
res=[]
for idx in index_array:
if 3<idx :
res.append("RAIN")
elif ( 1.2<idx and idx<=3 ):
res.append("CLOUDY")
elif ( 0.8<idx and idx<=1.2 ):
res.append("PARTLY_CLOUDY_DAY")
elif ( 0<idx and idx<=0.8 ):
res.append("CLEAR_DAY")
else:
res.append("SNOW")
return res
def foreign_giguan(index_array):
res = []
for idx in index_array:
if idx >0:
res.append("CLEAR_DAY")
elif idx==0:
res.append("CLOUDY")
else:
res.append("RAIN")
return res
# ====================================================
# ๋ฐ์ดํฐ
# ====================================================
# -------- ๋ณํฉ ํ์ผ ๋ถ๋ฌ์ค๊ธฐ
com_df=pd.read_csv('com_df.csv',
dtype={'stock_code': 'str', 'ํ์ค์ฝ๋': 'str', '๋จ์ถ์ฝ๋': 'str', 'stock_code_ori':'str'},
parse_dates=['listed_date', '์์ฅ์ผ'])
# -------- ๋ด์ค ํฌ๋กค๋ง
def news_crawl(gi):
tot_list = []
for p in range(1):
# ๋ด์ค ๊ธฐ์ฌ ๋ชจ์ธ ํ์ด์ง
url = 'https://m.stock.naver.com/domestic/stock/' + str(gi) + '/news/title' # https://m.stock.naver.com/domestic/stock/003550/total
#F12๋๋ฅด๋ฉด ๋์ค๋ ๋คํธ์ํฌ์์์ ์ฐพ์์จ ๊ฒฝ๋ก
#https://m.stock.naver.com/api/news/stock/005930?pageSize=20&page=1&searchMethod=title_entity_id.basic
url = "https://m.stock.naver.com/api/news/stock/"+str(gi)+"?pageSize=5&searchMethod=title_entity_id.basic&page=1"
res = requests.get(url)
news_list = json.loads(res.text)
#ํ์ด์ง์์ ๊ฐ์ ธ์จ ์ ์ฒด ๋ด์ค๊ธฐ์ฌ๋ฅผ for๋ฌธ์ผ๋ก ๋ถ๋ฆฌ
#print(news_list[0])
for i, news in enumerate(news_list) :
#์ ๋ฌธ์ฌ id
a=news['items'][0]['officeId']
#๊ธฐ์ฌ id
b=news['items'][0]['articleId']
list = []
list.append(news['items'][0]['officeName']) #์ ๋ฌธ์ฌ
list.append(news['items'][0]['datetime'][:8]) #๋ ์ง
list.append(news['items'][0]['title'].replace('"','\"')) #์ ๋ชฉ
list.append(news['items'][0]['imageOriginLink']) #์ด๋ฏธ์ง
list.append(news['items'][0]['body'].replace('"','\"')) # ๊ธฐ์ฌ ๋ด์ฉ
list.append('https://m.stock.naver.com/domestic/stock/005930/news/view/'+str(a)+'/'+str(b)) #๊ธฐ์ฌ url
tot_list.append(list)
news_df = pd.DataFrame(data=tot_list, columns=['offname','rdate','title','imgsrc','content','url'])
news_df['title'] = news_df['title'].str.replace('&', '&')
news_df['content'] = news_df['content'].str.replace('&', '&')
#news_df['title'] = [re.sub('[^A-Za-z0-9๊ฐ-ํฃ]', '' ,s) for s in news_df['title']]
#news_df.to_csv('css.csv',index=False)
return news_df
#co-์ข
๋ชฉ์ฝ๋
def relate_code_crawl(co):
#์ฐ๊ด ์ข
๋ชฉ์ฝ๋ ์๋ ํ์ด์ง ๋ถ๋ฌ์ค๊ธฐ
url='https://finance.naver.com/item/main.naver?code='+str(co)
page=pd.read_html(url,encoding='CP949')
#์ฐ๊ด ์ข
๋ชฉ๋ช
๊ณผ ์ข
๋ชฉ์ฝ๋ ๋ฝ์๋ด๊ธฐ(code_list[0]์ '์ข
๋ชฉ๋ช
'์ด์ด์ ์ ์ธ)
code_list=page[4].columns.tolist()
code_list=code_list[1:]
#์ข
๋ชฉ์ฝ๋ ๋ฆฌ์คํธ ๋ฐํ
codes=[]
for word in (code_list):
codes.append(word[-6:])
#print(codes)
return codes
# def before_1w_kospi(date):
# before1w=date-timedelta(days=7)
# return fdr.DataReader('KS11',before1w)[['Close']]#, fdr.DataReader('KQ11',before1w)
def invest_opinion(gcode):
url='https://finance.naver.com/item/coinfo.naver?code='+str(gcode)
page=pd.read_html(url,encoding='CP949')
try:
a,b=page[3][1].tolist()[0][:4].split('.')
return ((int(a)+int(b)/100)/5)*100 #์๊ฒฌ ์ ์ ๊ตฌํ ํ ๋ฐฑ๋ถ์จ๋ก ๋ค์ ๋ณํ
except ValueError:
return 0.1
#์ต์ํ ํจ์
def crawl_ifrs(gcode):
url = "http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A"+gcode+"&cID=&MenuYn=Y&ReportGB=&NewMenuID=11&stkGb=701"
table_list = pd.read_html(url, encoding='UTF-8')
ifrs = table_list[10]
ifrs = ifrs.fillna('9999999999')
for i in range(1, 5):
if ifrs.iloc[:, i].dtype == 'O':
ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: '9999999999' if type(x) == str else x)
print(ifrs.iloc[:, i])
ifrs.iloc[:, i] = ifrs.iloc[:, i].astype('float')
ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: format(float(x), ','))
ifrs = pd.concat([ifrs.iloc[:, 0], ifrs['Annual']], axis=1)
ifrs = ifrs.astype(str)
for i in range(1, 5):
ifrs.iloc[:12, i] = ifrs.iloc[:12, i].apply(lambda x: x[:-2])
ifrs.iloc[18:21, i] = ifrs.iloc[18:21, i].apply(lambda x: x[:-2])
ifrs.iloc[23:24, i] = ifrs.iloc[23:24, i].apply(lambda x: x[:-2])
ifrs = ifrs.replace(['9,999,999,999', '9,999,999,999.0'], ['-', '-'])
ifrs.rename(columns={'IFRS(์ฐ๊ฒฐ)': ''}, inplace=True)
ifrs = ifrs.to_html(justify="right", index=False, classes="table")
ifrs = ifrs.replace('border="1"', 'border="0"')
pd.options.display.float_format = '{:,.0f}'.format
ifrs = ifrs.replace('<td>', '<td align="right">')
ifrs = ifrs.replace('<th>', '<th style="text-align: right;">')
ifrs = ifrs.replace('halign="left"', 'style="text-align: center;"')
ifrs = ifrs.replace('class ="dataframe table"',
'class ="dataframe table" style = "table-layout:fixed;word-break:break-all;"')
return (ifrs)
def ori_code(yh_code):
origin_stock=com_df[com_df['yh_code']==yh_code]['stock_code_ori'].values[0]
return origin_stock
# ์๋ฆฌ๋ง ๋ชจ๋ธ
def stock_predict(code,ptype):
data = stock.get_market_ohlcv_by_date(fromdate="20220101", todate="20220222", ticker=str(code))
print(data.head())
data=data[[ptype]]
y_train=data
y_test=data
kpss_diffs = ndiffs(y_train, alpha=0.05, test='kpss', max_d=6)
adf_diffs = ndiffs(y_train, alpha=0.05, test='adf', max_d=6)
n_diffs = max(adf_diffs, kpss_diffs)
print(f"์ถ์ ๋ ์ฐจ์ d = {n_diffs}")
model=pm.auto_arima(y_train,d=n_diffs,seasonal=False,trace=True)
model.fit(y_train)
print(model.summary())
def forecast_one_step():
fc, conf_int = model.predict(n_periods=1 # ํ ์คํ
์ฉ!
, return_conf_int=True) # ์ ๋ขฐ๊ตฌ๊ฐ ์ถ๋ ฅ
return (
fc.tolist()[0],
np.asarray(conf_int).tolist()[0]
)
forecasts = []
y_pred = []
pred_upper = []
pred_lower = []
for new_ob in y_test[ptype]:
fc, conf = forecast_one_step()
y_pred.append(int(fc))
pred_upper.append(conf[1])
pred_lower.append(conf[0])
## ๋ชจํ ์
๋ฐ์ดํธ !!
model.update(new_ob)
fc_last = model.predict(n_periods=1 # ํ ์คํ
์ฉ!
)
df=pd.DataFrame({"test": y_test[ptype], "pred": y_pred})
print(df.tail())
def MAE(y_test, y_pred):
return np.mean(np.abs((df['test']-df['pred'])/df['test']))*100
mae=np.round(MAE(y_test, y_pred).astype('float'),4)
print(f"MAE: {MAE(y_test, y_pred):.3f}")
price_list=[]
return int(fc_last),mae
|
for x in arr_l
|
store.rs
|
use graph_chain_ethereum::{Mapping, MappingABI};
use graph_mock::MockMetricsRegistry;
use hex_literal::hex;
use lazy_static::lazy_static;
use std::time::Duration;
use std::{collections::HashSet, sync::Mutex};
use std::{marker::PhantomData, str::FromStr};
use test_store::*;
use graph::components::store::{DeploymentLocator, WritableStore};
use graph::data::subgraph::*;
use graph::prelude::*;
use graph::{
blockchain::DataSource,
components::store::{
BlockStore as _, EntityFilter, EntityKey, EntityOrder, EntityQuery, EntityType,
StatusStore, SubscriptionManager as _,
},
prelude::ethabi::Contract,
};
use graph::{data::store::scalar, semver::Version};
use graph_store_postgres::layout_for_tests::STRING_PREFIX_SIZE;
use graph_store_postgres::{Store as DieselStore, SubgraphStore as DieselSubgraphStore};
use web3::types::{Address, H256};
const USER_GQL: &str = "
interface ColorAndAge {
id: ID!,
age: Int,
favorite_color: String
}
type User implements ColorAndAge @entity {
id: ID!,
name: String,
bin_name: Bytes,
email: String,
age: Int,
seconds_age: BigInt,
weight: BigDecimal,
coffee: Boolean,
favorite_color: String
}
type Person implements ColorAndAge @entity {
id: ID!,
name: String,
age: Int,
favorite_color: String
}
type Manual @entity {
id: ID!,
text: String
}
";
const USER: &str = "User";
lazy_static! {
static ref TEST_SUBGRAPH_ID_STRING: String = String::from("testsubgraph");
static ref TEST_SUBGRAPH_ID: DeploymentHash =
DeploymentHash::new(TEST_SUBGRAPH_ID_STRING.as_str()).unwrap();
static ref TEST_SUBGRAPH_SCHEMA: Schema =
Schema::parse(USER_GQL, TEST_SUBGRAPH_ID.clone()).expect("Failed to parse user schema");
static ref TEST_BLOCK_0_PTR: BlockPtr = (
H256::from(hex!(
"bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f"
)),
0u64
)
.into();
static ref TEST_BLOCK_1_PTR: BlockPtr = (
H256::from(hex!(
"8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13"
)),
1u64
)
.into();
static ref TEST_BLOCK_2_PTR: BlockPtr = (
H256::from(hex!(
"b98fb783b49de5652097a989414c767824dff7e7fd765a63b493772511db81c1"
)),
2u64
)
.into();
static ref TEST_BLOCK_3_PTR: BlockPtr = (
H256::from(hex!(
"977c084229c72a0fa377cae304eda9099b6a2cb5d83b25cdf0f0969b69874255"
)),
3u64
)
.into();
static ref TEST_BLOCK_3A_PTR: BlockPtr = (
H256::from(hex!(
"d163aec0592c7cb00c2700ab65dcaac93289f5d250b3b889b39198b07e1fbe4a"
)),
3u64
)
.into();
static ref TEST_BLOCK_4_PTR: BlockPtr = (
H256::from(hex!(
"007a03cdf635ebb66f5e79ae66cc90ca23d98031665649db056ff9c6aac2d74d"
)),
4u64
)
.into();
static ref TEST_BLOCK_4A_PTR: BlockPtr = (
H256::from(hex!(
"8fab27e9e9285b0a39110f4d9877f05d0f43d2effa157e55f4dcc49c3cf8cbd7"
)),
4u64
)
.into();
static ref TEST_BLOCK_5_PTR: BlockPtr = (
H256::from(hex!(
"e8b3b02b936c4a4a331ac691ac9a86e197fb7731f14e3108602c87d4dac55160"
)),
5u64
)
.into();
}
/// Test harness for running database integration tests.
fn run_test<R, F>(test: F)
where
F: FnOnce(Arc<DieselStore>, Arc<dyn WritableStore>, DeploymentLocator) -> R + Send + 'static,
R: std::future::Future<Output = ()> + Send + 'static,
{
run_test_sequentially(|store| async move {
let subgraph_store = store.subgraph_store();
// Reset state before starting
remove_test_data(subgraph_store.clone());
// Seed database with test data
let deployment = insert_test_data(subgraph_store.clone());
let writable = store
.subgraph_store()
.writable(LOGGER.clone(), deployment.id)
.await
.expect("we can get a writable store");
// Run test
test(store, writable, deployment).await
});
}
/// Inserts test data into the store.
///
/// Inserts data in test blocks `GENESIS_PTR`, `TEST_BLOCK_1_PTR`, and
/// `TEST_BLOCK_2_PTR`
fn insert_test_data(store: Arc<DieselSubgraphStore>) -> DeploymentLocator {
let manifest = SubgraphManifest::<graph_chain_ethereum::Chain> {
id: TEST_SUBGRAPH_ID.clone(),
spec_version: Version::new(1, 0, 0),
features: Default::default(),
description: None,
repository: None,
schema: TEST_SUBGRAPH_SCHEMA.clone(),
data_sources: vec![],
graft: None,
templates: vec![],
chain: PhantomData,
};
// Create SubgraphDeploymentEntity
let deployment = SubgraphDeploymentEntity::new(&manifest, false, None);
let name = SubgraphName::new("test/store").unwrap();
let node_id = NodeId::new("test").unwrap();
let deployment = store
.create_subgraph_deployment(
name,
&TEST_SUBGRAPH_SCHEMA,
deployment,
node_id,
NETWORK_NAME.to_string(),
SubgraphVersionSwitchingMode::Instant,
)
.unwrap();
let test_entity_1 = create_test_entity(
"1",
USER,
"Johnton",
"[email protected]",
67 as i32,
184.4,
false,
None,
);
transact_entity_operations(
&store,
&deployment,
GENESIS_PTR.clone(),
vec![test_entity_1],
)
.unwrap();
let test_entity_2 = create_test_entity(
"2",
USER,
"Cindini",
"[email protected]",
43 as i32,
159.1,
true,
Some("red"),
);
let test_entity_3_1 = create_test_entity(
"3",
USER,
"Shaqueeena",
"[email protected]",
28 as i32,
111.7,
false,
Some("blue"),
);
transact_entity_operations(
&store,
&deployment,
TEST_BLOCK_1_PTR.clone(),
vec![test_entity_2, test_entity_3_1],
)
.unwrap();
let test_entity_3_2 = create_test_entity(
"3",
USER,
"Shaqueeena",
"[email protected]",
28 as i32,
111.7,
false,
None,
);
transact_entity_operations(
&store,
&deployment,
TEST_BLOCK_2_PTR.clone(),
vec![test_entity_3_2],
)
.unwrap();
deployment
}
/// Creates a test entity.
fn create_test_entity(
id: &str,
entity_type: &str,
name: &str,
email: &str,
age: i32,
weight: f64,
coffee: bool,
favorite_color: Option<&str>,
) -> EntityOperation
|
/// Removes test data from the database behind the store.
fn remove_test_data(store: Arc<DieselSubgraphStore>) {
store
.delete_all_entities_for_test_use_only()
.expect("deleting test entities succeeds");
}
fn get_entity_count(store: Arc<DieselStore>, subgraph_id: &DeploymentHash) -> u64 {
let info = store
.status(status::Filter::Deployments(vec![subgraph_id.to_string()]))
.unwrap();
let info = info.first().unwrap();
info.entity_count
}
#[test]
fn delete_entity() {
run_test(|store, writable, deployment| async move {
let entity_key = EntityKey::data(deployment.hash.clone(), USER.to_owned(), "3".to_owned());
// Check that there is an entity to remove.
writable.get(&entity_key).unwrap().unwrap();
let count = get_entity_count(store.clone(), &&deployment.hash);
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_3_PTR.clone(),
vec![EntityOperation::Remove {
key: entity_key.clone(),
}],
)
.unwrap();
assert_eq!(
count,
get_entity_count(store.clone(), &&deployment.hash) + 1
);
// Check that that the deleted entity id is not present
assert!(writable.get(&entity_key).unwrap().is_none());
})
}
/// Check that user 1 was inserted correctly
#[test]
fn get_entity_1() {
run_test(|_, writable, deployment| async move {
let key = EntityKey::data(deployment.hash.clone(), USER.to_owned(), "1".to_owned());
let result = writable.get(&key).unwrap();
let mut expected_entity = Entity::new();
expected_entity.insert("__typename".to_owned(), USER.into());
expected_entity.insert("id".to_owned(), "1".into());
expected_entity.insert("name".to_owned(), "Johnton".into());
expected_entity.insert(
"bin_name".to_owned(),
Value::Bytes("Johnton".as_bytes().into()),
);
expected_entity.insert("email".to_owned(), "[email protected]".into());
expected_entity.insert("age".to_owned(), Value::Int(67 as i32));
expected_entity.insert(
"seconds_age".to_owned(),
Value::BigInt(BigInt::from(2114359200)),
);
expected_entity.insert("weight".to_owned(), Value::BigDecimal(184.4.into()));
expected_entity.insert("coffee".to_owned(), Value::Bool(false));
// "favorite_color" was set to `Null` earlier and should be absent
// Check that the expected entity was returned
assert_eq!(result, Some(expected_entity));
})
}
/// Check that user 3 was updated correctly
#[test]
fn get_entity_3() {
run_test(|_, writable, deployment| async move {
let key = EntityKey::data(deployment.hash.clone(), USER.to_owned(), "3".to_owned());
let result = writable.get(&key).unwrap();
let mut expected_entity = Entity::new();
expected_entity.insert("__typename".to_owned(), USER.into());
expected_entity.insert("id".to_owned(), "3".into());
expected_entity.insert("name".to_owned(), "Shaqueeena".into());
expected_entity.insert(
"bin_name".to_owned(),
Value::Bytes("Shaqueeena".as_bytes().into()),
);
expected_entity.insert("email".to_owned(), "[email protected]".into());
expected_entity.insert("age".to_owned(), Value::Int(28 as i32));
expected_entity.insert(
"seconds_age".to_owned(),
Value::BigInt(BigInt::from(883612800)),
);
expected_entity.insert("weight".to_owned(), Value::BigDecimal(111.7.into()));
expected_entity.insert("coffee".to_owned(), Value::Bool(false));
// "favorite_color" was set to `Null` earlier and should be absent
// Check that the expected entity was returned
assert_eq!(result, Some(expected_entity));
})
}
#[test]
fn insert_entity() {
run_test(|store, writable, deployment| async move {
let entity_key = EntityKey::data(deployment.hash.clone(), USER.to_owned(), "7".to_owned());
let test_entity = create_test_entity(
"7",
USER,
"Wanjon",
"[email protected]",
76 as i32,
111.7,
true,
Some("green"),
);
let count = get_entity_count(store.clone(), &&deployment.hash);
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_3_PTR.clone(),
vec![test_entity],
)
.unwrap();
assert_eq!(count + 1, get_entity_count(store.clone(), &deployment.hash));
// Check that new record is in the store
writable.get(&entity_key).unwrap().unwrap();
})
}
#[test]
fn update_existing() {
run_test(|store, writable, deployment| async move {
let entity_key = EntityKey::data(deployment.hash.clone(), USER.to_owned(), "1".to_owned());
let op = create_test_entity(
"1",
USER,
"Wanjon",
"[email protected]",
76 as i32,
111.7,
true,
Some("green"),
);
let mut new_data = match op {
EntityOperation::Set { ref data, .. } => data.clone(),
_ => unreachable!(),
};
// Verify that the entity before updating is different from what we expect afterwards
assert_ne!(writable.get(&entity_key).unwrap().unwrap(), new_data);
// Set test entity; as the entity already exists an update should be performed
let count = get_entity_count(store.clone(), &deployment.hash);
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_3_PTR.clone(),
vec![op],
)
.unwrap();
assert_eq!(count, get_entity_count(store.clone(), &deployment.hash));
// Verify that the entity in the store has changed to what we have set.
let bin_name = match new_data.get("bin_name") {
Some(Value::Bytes(bytes)) => bytes.clone(),
_ => unreachable!(),
};
new_data.insert("__typename".to_owned(), USER.into());
new_data.insert("bin_name".to_owned(), Value::Bytes(bin_name));
assert_eq!(writable.get(&entity_key).unwrap(), Some(new_data));
})
}
#[test]
fn partially_update_existing() {
run_test(|store, writable, deployment| async move {
let entity_key = EntityKey::data(deployment.hash.clone(), USER.to_owned(), "1".to_owned());
let partial_entity = Entity::from(vec![
("id", Value::from("1")),
("name", Value::from("Johnny Boy")),
("email", Value::Null),
]);
let original_entity = writable
.get(&entity_key)
.unwrap()
.expect("entity not found");
// Set test entity; as the entity already exists an update should be performed
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_3_PTR.clone(),
vec![EntityOperation::Set {
key: entity_key.clone(),
data: partial_entity.clone(),
}],
)
.unwrap();
// Obtain the updated entity from the store
let updated_entity = writable
.get(&entity_key)
.unwrap()
.expect("entity not found");
// Verify that the values of all attributes we have set were either unset
// (in the case of Value::Null) or updated to the new values
assert_eq!(updated_entity.get("id"), partial_entity.get("id"));
assert_eq!(updated_entity.get(USER), partial_entity.get(USER));
assert_eq!(updated_entity.get("email"), None);
// Verify that all attributes we have not set have remained at their old values
assert_eq!(updated_entity.get("age"), original_entity.get("age"));
assert_eq!(updated_entity.get("weight"), original_entity.get("weight"));
assert_eq!(updated_entity.get("coffee"), original_entity.get("coffee"));
})
}
struct QueryChecker {
store: Arc<DieselStore>,
}
impl QueryChecker {
fn new(store: Arc<DieselStore>) -> Self {
Self { store }
}
fn check(self, expected_entity_ids: Vec<&str>, query: EntityQuery) -> Self {
let expected_entity_ids: Vec<String> =
expected_entity_ids.into_iter().map(str::to_owned).collect();
let entities = self
.store
.subgraph_store()
.find(query)
.expect("store.find failed to execute query");
let entity_ids: Vec<_> = entities
.into_iter()
.map(|entity| match entity.get("id") {
Some(Value::String(id)) => id.to_owned(),
Some(_) => panic!("store.find returned entity with non-string ID attribute"),
None => panic!("store.find returned entity with no ID attribute"),
})
.collect();
assert_eq!(entity_ids, expected_entity_ids);
self
}
}
fn user_query() -> EntityQuery {
EntityQuery::new(
TEST_SUBGRAPH_ID.clone(),
BLOCK_NUMBER_MAX,
EntityCollection::All(vec![(EntityType::from(USER), AttributeNames::All)]),
)
}
trait EasyOrder {
fn asc(self, attr: &str) -> Self;
fn desc(self, attr: &str) -> Self;
}
impl EasyOrder for EntityQuery {
fn asc(self, attr: &str) -> Self {
// The ValueType doesn't matter since relational layouts ignore it
self.order(EntityOrder::Ascending(attr.to_owned(), ValueType::String))
}
fn desc(self, attr: &str) -> Self {
// The ValueType doesn't matter since relational layouts ignore it
self.order(EntityOrder::Descending(attr.to_owned(), ValueType::String))
}
}
#[test]
fn find() {
run_test(|store, _, _| async move {
// Filter tests with string attributes
QueryChecker::new(store.clone())
.check(
vec!["2"],
user_query().filter(EntityFilter::Contains("name".into(), "ind".into())),
)
.check(
vec!["2"],
user_query().filter(EntityFilter::Equal("name".to_owned(), "Cindini".into())),
)
.check(
vec!["1", "3"],
user_query()
.filter(EntityFilter::Not("name".to_owned(), "Cindini".into()))
.asc("name"),
)
.check(
vec!["3"],
user_query().filter(EntityFilter::GreaterThan("name".to_owned(), "Kundi".into())),
)
.check(
vec!["2", "1"],
user_query()
.filter(EntityFilter::LessThan("name".to_owned(), "Kundi".into()))
.asc("name"),
)
.check(
vec!["1", "2"],
user_query()
.filter(EntityFilter::LessThan("name".to_owned(), "Kundi".into()))
.desc("name"),
)
.check(
vec!["1"],
user_query()
.filter(EntityFilter::LessThan("name".to_owned(), "ZZZ".into()))
.desc("name")
.first(1)
.skip(1),
)
.check(
vec!["2"],
user_query()
.filter(EntityFilter::And(vec![
EntityFilter::LessThan("name".to_owned(), "Cz".into()),
EntityFilter::Equal("name".to_owned(), "Cindini".into()),
]))
.desc("name"),
)
.check(
vec!["2"],
user_query()
.filter(EntityFilter::EndsWith("name".to_owned(), "ini".into()))
.desc("name"),
)
.check(
vec!["3", "1"],
user_query()
.filter(EntityFilter::NotEndsWith("name".to_owned(), "ini".into()))
.desc("name"),
)
.check(
vec!["1"],
user_query()
.filter(EntityFilter::In("name".to_owned(), vec!["Johnton".into()]))
.desc("name"),
)
.check(
vec!["1", "2"],
user_query()
.filter(EntityFilter::NotIn(
"name".to_owned(),
vec!["Shaqueeena".into()],
))
.desc("name"),
);
// Filter tests with float attributes
QueryChecker::new(store.clone())
.check(
vec!["1"],
user_query().filter(EntityFilter::Equal(
"weight".to_owned(),
Value::BigDecimal(184.4.into()),
)),
)
.check(
vec!["3", "2"],
user_query()
.filter(EntityFilter::Not(
"weight".to_owned(),
Value::BigDecimal(184.4.into()),
))
.desc("name"),
)
.check(
vec!["1"],
user_query().filter(EntityFilter::GreaterThan(
"weight".to_owned(),
Value::BigDecimal(160.0.into()),
)),
)
.check(
vec!["2", "3"],
user_query()
.filter(EntityFilter::LessThan(
"weight".to_owned(),
Value::BigDecimal(160.0.into()),
))
.asc("name"),
)
.check(
vec!["3", "2"],
user_query()
.filter(EntityFilter::LessThan(
"weight".to_owned(),
Value::BigDecimal(160.0.into()),
))
.desc("name"),
)
.check(
vec!["2"],
user_query()
.filter(EntityFilter::LessThan(
"weight".to_owned(),
Value::BigDecimal(161.0.into()),
))
.desc("name")
.first(1)
.skip(1),
)
.check(
vec!["3", "1"],
user_query()
.filter(EntityFilter::In(
"weight".to_owned(),
vec![
Value::BigDecimal(184.4.into()),
Value::BigDecimal(111.7.into()),
],
))
.desc("name")
.first(5),
)
.check(
vec!["2"],
user_query()
.filter(EntityFilter::NotIn(
"weight".to_owned(),
vec![
Value::BigDecimal(184.4.into()),
Value::BigDecimal(111.7.into()),
],
))
.desc("name")
.first(5),
);
// Filter tests with int attributes
QueryChecker::new(store.clone())
.check(
vec!["1"],
user_query()
.filter(EntityFilter::Equal("age".to_owned(), Value::Int(67 as i32)))
.desc("name"),
)
.check(
vec!["3", "2"],
user_query()
.filter(EntityFilter::Not("age".to_owned(), Value::Int(67 as i32)))
.desc("name"),
)
.check(
vec!["1"],
user_query().filter(EntityFilter::GreaterThan(
"age".to_owned(),
Value::Int(43 as i32),
)),
)
.check(
vec!["2", "1"],
user_query()
.filter(EntityFilter::GreaterOrEqual(
"age".to_owned(),
Value::Int(43 as i32),
))
.asc("name"),
)
.check(
vec!["2", "3"],
user_query()
.filter(EntityFilter::LessThan(
"age".to_owned(),
Value::Int(50 as i32),
))
.asc("name"),
)
.check(
vec!["2", "3"],
user_query()
.filter(EntityFilter::LessOrEqual(
"age".to_owned(),
Value::Int(43 as i32),
))
.asc("name"),
)
.check(
vec!["3", "2"],
user_query()
.filter(EntityFilter::LessThan(
"age".to_owned(),
Value::Int(50 as i32),
))
.desc("name"),
)
.check(
vec!["2"],
user_query()
.filter(EntityFilter::LessThan(
"age".to_owned(),
Value::Int(67 as i32),
))
.desc("name")
.first(1)
.skip(1),
)
.check(
vec!["1", "2"],
user_query()
.filter(EntityFilter::In(
"age".to_owned(),
vec![Value::Int(67 as i32), Value::Int(43 as i32)],
))
.desc("name")
.first(5),
)
.check(
vec!["3"],
user_query()
.filter(EntityFilter::NotIn(
"age".to_owned(),
vec![Value::Int(67 as i32), Value::Int(43 as i32)],
))
.desc("name")
.first(5),
);
// Filter tests with bool attributes
QueryChecker::new(store.clone())
.check(
vec!["2"],
user_query()
.filter(EntityFilter::Equal("coffee".to_owned(), Value::Bool(true)))
.desc("name"),
)
.check(
vec!["1", "3"],
user_query()
.filter(EntityFilter::Not("coffee".to_owned(), Value::Bool(true)))
.asc("name"),
)
.check(
vec!["2"],
user_query()
.filter(EntityFilter::In(
"coffee".to_owned(),
vec![Value::Bool(true)],
))
.desc("name")
.first(5),
)
.check(
vec!["3", "1"],
user_query()
.filter(EntityFilter::NotIn(
"coffee".to_owned(),
vec![Value::Bool(true)],
))
.desc("name")
.first(5),
);
// Misc filter tests
QueryChecker::new(store)
.check(
vec!["1"],
user_query()
.filter(EntityFilter::Equal(
"bin_name".to_owned(),
Value::Bytes("Johnton".as_bytes().into()),
))
.desc("name"),
)
.check(
vec!["3", "1"],
user_query()
.filter(EntityFilter::Equal(
"favorite_color".to_owned(),
Value::Null,
))
.desc("name"),
)
.check(
vec!["3", "1"],
user_query()
.filter(EntityFilter::Equal(
"favorite_color".to_owned(),
Value::Null,
))
.desc("name"),
)
.check(
vec!["2"],
user_query()
.filter(EntityFilter::Not("favorite_color".to_owned(), Value::Null))
.desc("name"),
)
.check(
vec!["2"],
user_query()
.filter(EntityFilter::NotIn(
"favorite_color".to_owned(),
vec![Value::Null],
))
.desc("name"),
)
.check(vec!["3", "2", "1"], user_query().asc("weight"))
.check(vec!["1", "2", "3"], user_query().desc("weight"))
.check(vec!["1", "2", "3"], user_query().asc("id"))
.check(vec!["3", "2", "1"], user_query().desc("id"))
.check(vec!["3", "2", "1"], user_query().asc("age"))
.check(vec!["1", "2", "3"], user_query().desc("age"))
.check(vec!["2", "1", "3"], user_query().asc("name"))
.check(vec!["3", "1", "2"], user_query().desc("name"))
.check(
vec!["1", "2"],
user_query()
.filter(EntityFilter::And(vec![EntityFilter::Or(vec![
EntityFilter::Equal("id".to_owned(), Value::from("1")),
EntityFilter::Equal("id".to_owned(), Value::from("2")),
])]))
.asc("id"),
);
});
}
fn make_entity_change(entity_type: &str) -> EntityChange {
EntityChange::Data {
subgraph_id: TEST_SUBGRAPH_ID.clone(),
entity_type: EntityType::new(entity_type.to_owned()),
}
}
// Get as events until we've seen all the expected events or we time out waiting
async fn check_events(
stream: StoreEventStream<impl Stream<Item = Arc<StoreEvent>, Error = ()> + Send>,
expected: Vec<StoreEvent>,
) {
fn as_set(events: Vec<Arc<StoreEvent>>) -> HashSet<EntityChange> {
events.into_iter().fold(HashSet::new(), |mut set, event| {
set.extend(event.changes.iter().map(|change| change.clone()));
set
})
}
let expected = Mutex::new(as_set(
expected.into_iter().map(|event| Arc::new(event)).collect(),
));
// Capture extra changes here; this is only needed for debugging, really.
// It's permissible that we get more changes than we expected because of
// how store events group changes together
let extra: Mutex<HashSet<EntityChange>> = Mutex::new(HashSet::new());
// Get events from the store until we've either seen all the changes we
// expected or we timed out waiting for them
stream
.take_while(|event| {
let mut expected = expected.lock().unwrap();
for change in &event.changes {
if !expected.remove(&change) {
extra.lock().unwrap().insert(change.clone());
}
}
future::ok(!expected.is_empty())
})
.collect()
.compat()
.timeout(Duration::from_secs(3))
.await
.expect(&format!(
"timed out waiting for events\n still waiting for {:?}\n got extra events {:?}",
expected.lock().unwrap().clone(),
extra.lock().unwrap().clone()
))
.expect("something went wrong getting events");
// Check again that we really got everything
assert_eq!(HashSet::new(), expected.lock().unwrap().clone());
}
// Subscribe to store events
fn subscribe(
subgraph: &DeploymentHash,
entity_type: &str,
) -> StoreEventStream<impl Stream<Item = Arc<StoreEvent>, Error = ()> + Send> {
let subscription = SUBSCRIPTION_MANAGER.subscribe(vec![SubscriptionFilter::Entities(
subgraph.clone(),
EntityType::new(entity_type.to_owned()),
)]);
StoreEventStream::new(subscription)
}
async fn check_basic_revert(
store: Arc<DieselStore>,
expected: StoreEvent,
deployment: &DeploymentLocator,
entity_type: &str,
) {
let this_query = user_query()
.filter(EntityFilter::Equal(
"name".to_owned(),
Value::String("Shaqueeena".to_owned()),
))
.desc("name");
let subscription = subscribe(&deployment.hash, entity_type);
let state = deployment_state(store.as_ref(), &deployment.hash).await;
assert_eq!(&deployment.hash, &state.id);
// Revert block 3
revert_block(&store, &deployment, &*TEST_BLOCK_1_PTR).await;
let returned_entities = store
.subgraph_store()
.find(this_query.clone())
.expect("store.find operation failed");
// There should be 1 user returned in results
assert_eq!(1, returned_entities.len());
// Check if the first user in the result vector has email "[email protected]"
let returned_name = returned_entities[0].get(&"email".to_owned());
let test_value = Value::String("[email protected]".to_owned());
assert!(returned_name.is_some());
assert_eq!(&test_value, returned_name.unwrap());
let state = deployment_state(store.as_ref(), &deployment.hash).await;
assert_eq!(&deployment.hash, &state.id);
check_events(subscription, vec![expected]).await
}
#[test]
fn revert_block_basic_user() {
run_test(|store, _, deployment| async move {
let expected = StoreEvent::new(vec![make_entity_change(USER)]);
let count = get_entity_count(store.clone(), &deployment.hash);
check_basic_revert(store.clone(), expected, &deployment, USER).await;
assert_eq!(count, get_entity_count(store.clone(), &deployment.hash));
})
}
#[test]
fn revert_block_with_delete() {
run_test(|store, _, deployment| async move {
let this_query = user_query()
.filter(EntityFilter::Equal(
"name".to_owned(),
Value::String("Cindini".to_owned()),
))
.desc("name");
// Delete entity with id=2
let del_key = EntityKey::data(deployment.hash.clone(), USER.to_owned(), "2".to_owned());
// Process deletion
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_3_PTR.clone(),
vec![EntityOperation::Remove { key: del_key }],
)
.unwrap();
let subscription = subscribe(&deployment.hash, USER);
// Revert deletion
let count = get_entity_count(store.clone(), &deployment.hash);
revert_block(&store, &deployment, &*TEST_BLOCK_2_PTR).await;
assert_eq!(count + 1, get_entity_count(store.clone(), &deployment.hash));
// Query after revert
let returned_entities = store
.subgraph_store()
.find(this_query.clone())
.expect("store.find operation failed");
// There should be 1 entity returned in results
assert_eq!(1, returned_entities.len());
// Check if "[email protected]" is in result set
let returned_name = returned_entities[0].get(&"email".to_owned());
let test_value = Value::String("[email protected]".to_owned());
assert!(returned_name.is_some());
assert_eq!(&test_value, returned_name.unwrap());
// Check that the subscription notified us of the changes
let expected = StoreEvent::new(vec![make_entity_change(USER)]);
// The last event is the one for the reversion
check_events(subscription, vec![expected]).await
})
}
#[test]
fn revert_block_with_partial_update() {
run_test(|store, writable, deployment| async move {
let entity_key = EntityKey::data(deployment.hash.clone(), USER.to_owned(), "1".to_owned());
let partial_entity = Entity::from(vec![
("id", Value::from("1")),
("name", Value::from("Johnny Boy")),
("email", Value::Null),
]);
let original_entity = writable.get(&entity_key).unwrap().expect("missing entity");
// Set test entity; as the entity already exists an update should be performed
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_3_PTR.clone(),
vec![EntityOperation::Set {
key: entity_key.clone(),
data: partial_entity.clone(),
}],
)
.unwrap();
let subscription = subscribe(&deployment.hash, USER);
// Perform revert operation, reversing the partial update
let count = get_entity_count(store.clone(), &deployment.hash);
revert_block(&store, &deployment, &*TEST_BLOCK_2_PTR).await;
assert_eq!(count, get_entity_count(store.clone(), &deployment.hash));
// Obtain the reverted entity from the store
let reverted_entity = writable.get(&entity_key).unwrap().expect("missing entity");
// Verify that the entity has been returned to its original state
assert_eq!(reverted_entity, original_entity);
// Check that the subscription notified us of the changes
let expected = StoreEvent::new(vec![make_entity_change(USER)]);
check_events(subscription, vec![expected]).await
})
}
fn mock_data_source() -> graph_chain_ethereum::DataSource {
graph_chain_ethereum::DataSource {
kind: String::from("ethereum/contract"),
name: String::from("example data source"),
network: Some(String::from("mainnet")),
source: Source {
address: Some(Address::from_str("0123123123012312312301231231230123123123").unwrap()),
abi: String::from("123123"),
start_block: 0,
},
mapping: Mapping {
kind: String::from("ethereum/events"),
api_version: Version::parse("0.1.0").unwrap(),
language: String::from("wasm/assemblyscript"),
entities: vec![],
abis: vec![],
event_handlers: vec![],
call_handlers: vec![],
block_handlers: vec![],
link: Link {
link: "link".to_owned(),
},
runtime: Arc::new(Vec::new()),
},
context: Default::default(),
creation_block: None,
contract_abi: Arc::new(mock_abi()),
}
}
fn mock_abi() -> MappingABI {
MappingABI {
name: "mock_abi".to_string(),
contract: Contract::load(
r#"[
{
"inputs": [
{
"name": "a",
"type": "address"
}
],
"type": "constructor"
}
]"#
.as_bytes(),
)
.unwrap(),
}
}
#[test]
fn revert_block_with_dynamic_data_source_operations() {
run_test(|store, writable, deployment| async move {
let subgraph_store = store.subgraph_store();
// Create operations to add a user
let user_key = EntityKey::data(deployment.hash.clone(), USER.to_owned(), "1".to_owned());
let partial_entity = Entity::from(vec![
("id", Value::from("1")),
("name", Value::from("Johnny Boy")),
("email", Value::Null),
]);
// Get the original user for comparisons
let original_user = writable.get(&user_key).unwrap().expect("missing entity");
// Create operations to add a dynamic data source
let data_source = mock_data_source();
let ops = vec![EntityOperation::Set {
key: user_key.clone(),
data: partial_entity.clone(),
}];
// Add user and dynamic data source to the store
transact_entities_and_dynamic_data_sources(
&subgraph_store,
deployment.clone(),
TEST_BLOCK_3_PTR.clone(),
vec![data_source.as_stored_dynamic_data_source()],
ops,
)
.unwrap();
// Verify that the user is no longer the original
assert_ne!(
writable.get(&user_key).unwrap().expect("missing entity"),
original_user
);
// Verify that the dynamic data source exists afterwards
let loaded_dds = writable.load_dynamic_data_sources().await.unwrap();
assert_eq!(1, loaded_dds.len());
assert_eq!(data_source.source, loaded_dds[0].source);
let subscription = subscribe(&deployment.hash, USER);
// Revert block that added the user and the dynamic data source
revert_block(&store, &deployment, &*TEST_BLOCK_2_PTR).await;
// Verify that the user is the original again
assert_eq!(
writable.get(&user_key).unwrap().expect("missing entity"),
original_user
);
// Verify that the dynamic data source is gone after the reversion
let loaded_dds = writable.load_dynamic_data_sources().await.unwrap();
assert_eq!(0, loaded_dds.len());
// Verify that the right change events were emitted for the reversion
let expected_events = vec![StoreEvent {
tag: 3,
changes: HashSet::from_iter(
vec![EntityChange::Data {
subgraph_id: DeploymentHash::new("testsubgraph").unwrap(),
entity_type: EntityType::new(USER.into()),
}]
.into_iter(),
),
}];
check_events(subscription, expected_events).await
})
}
#[test]
fn entity_changes_are_fired_and_forwarded_to_subscriptions() {
run_test(|store, _, _| async move {
let subgraph_id = DeploymentHash::new("EntityChangeTestSubgraph").unwrap();
let schema =
Schema::parse(USER_GQL, subgraph_id.clone()).expect("Failed to parse user schema");
let manifest = SubgraphManifest::<graph_chain_ethereum::Chain> {
id: subgraph_id.clone(),
spec_version: Version::new(1, 0, 0),
features: Default::default(),
description: None,
repository: None,
schema: schema.clone(),
data_sources: vec![],
graft: None,
templates: vec![],
chain: PhantomData,
};
// Create SubgraphDeploymentEntity
let deployment_entity =
SubgraphDeploymentEntity::new(&manifest, false, Some(TEST_BLOCK_0_PTR.clone()));
let name = SubgraphName::new("test/entity-changes-are-fired").unwrap();
let node_id = NodeId::new("test").unwrap();
let deployment = store
.subgraph_store()
.create_subgraph_deployment(
name,
&schema,
deployment_entity,
node_id,
NETWORK_NAME.to_string(),
SubgraphVersionSwitchingMode::Instant,
)
.unwrap();
let subscription = subscribe(&subgraph_id, USER);
// Add two entities to the store
let added_entities = vec![
(
"1".to_owned(),
Entity::from(vec![
("id", Value::from("1")),
("name", Value::from("Johnny Boy")),
]),
),
(
"2".to_owned(),
Entity::from(vec![
("id", Value::from("2")),
("name", Value::from("Tessa")),
]),
),
];
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_1_PTR.clone(),
added_entities
.iter()
.map(|(id, data)| EntityOperation::Set {
key: EntityKey::data(subgraph_id.clone(), USER.to_owned(), id.to_owned()),
data: data.to_owned(),
})
.collect(),
)
.unwrap();
// Update an entity in the store
let updated_entity = Entity::from(vec![
("id", Value::from("1")),
("name", Value::from("Johnny")),
]);
let update_op = EntityOperation::Set {
key: EntityKey::data(subgraph_id.clone(), USER.to_owned(), "1".to_owned()),
data: updated_entity.clone(),
};
// Delete an entity in the store
let delete_op = EntityOperation::Remove {
key: EntityKey::data(subgraph_id.clone(), USER.to_owned(), "2".to_owned()),
};
// Commit update & delete ops
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_2_PTR.clone(),
vec![update_op, delete_op],
)
.unwrap();
// We're expecting two events to be written to the subscription stream
let user_type = EntityType::new(USER.to_owned());
let expected = vec![
StoreEvent::new(vec![
EntityChange::Data {
subgraph_id: subgraph_id.clone(),
entity_type: user_type.clone(),
},
EntityChange::Data {
subgraph_id: subgraph_id.clone(),
entity_type: user_type.clone(),
},
]),
StoreEvent::new(vec![
EntityChange::Data {
subgraph_id: subgraph_id.clone(),
entity_type: user_type.clone(),
},
EntityChange::Data {
subgraph_id: subgraph_id.clone(),
entity_type: user_type.clone(),
},
]),
];
check_events(subscription, expected).await
})
}
#[test]
fn throttle_subscription_delivers() {
run_test(|store, _, deployment| async move {
let subscription = subscribe(&deployment.hash, USER)
.throttle_while_syncing(
&*LOGGER,
store
.clone()
.query_store(deployment.hash.clone().into(), true)
.await
.unwrap(),
Duration::from_millis(500),
)
.await;
let user4 = create_test_entity(
"4",
USER,
"Steve",
"[email protected]",
72 as i32,
120.7,
false,
None,
);
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_3_PTR.clone(),
vec![user4],
)
.unwrap();
let expected = StoreEvent::new(vec![make_entity_change(USER)]);
check_events(subscription, vec![expected]).await
})
}
#[test]
fn throttle_subscription_throttles() {
run_test(|store, _, deployment| async move {
// Throttle for a very long time (30s)
let subscription = subscribe(&deployment.hash, USER)
.throttle_while_syncing(
&*LOGGER,
store
.clone()
.query_store(deployment.hash.clone().into(), true)
.await
.unwrap(),
Duration::from_secs(30),
)
.await;
let user4 = create_test_entity(
"4",
USER,
"Steve",
"[email protected]",
72 as i32,
120.7,
false,
None,
);
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_3_PTR.clone(),
vec![user4],
)
.unwrap();
// Make sure we time out waiting for the subscription
let res = subscription
.take(1)
.collect()
.compat()
.timeout(Duration::from_millis(500))
.await;
assert!(res.is_err());
})
}
#[test]
fn subgraph_schema_types_have_subgraph_id_directive() {
run_test(|store, _, deployment| async move {
let schema = store
.subgraph_store()
.api_schema(&deployment.hash)
.expect("test subgraph should have a schema");
for typedef in schema
.document()
.definitions
.iter()
.filter_map(|def| match def {
s::Definition::TypeDefinition(typedef) => Some(typedef),
_ => None,
})
{
// Verify that all types have a @subgraphId directive on them
let directive = match typedef {
s::TypeDefinition::Object(t) => &t.directives,
s::TypeDefinition::Interface(t) => &t.directives,
s::TypeDefinition::Enum(t) => &t.directives,
s::TypeDefinition::Scalar(t) => &t.directives,
s::TypeDefinition::Union(t) => &t.directives,
s::TypeDefinition::InputObject(t) => &t.directives,
}
.iter()
.find(|directive| directive.name == "subgraphId")
.expect("all subgraph schema types should have a @subgraphId directive");
// Verify that all @subgraphId directives match the subgraph
assert_eq!(
directive.arguments,
[(
String::from("id"),
s::Value::String(TEST_SUBGRAPH_ID_STRING.to_string())
)]
);
}
})
}
#[test]
fn handle_large_string_with_index() {
const NAME: &str = "name";
const ONE: &str = "large_string_one";
const TWO: &str = "large_string_two";
fn make_insert_op(id: &str, name: &str) -> EntityModification {
let mut data = Entity::new();
data.set("id", id);
data.set(NAME, name);
let key = EntityKey::data(TEST_SUBGRAPH_ID.clone(), USER.to_owned(), id.to_owned());
EntityModification::Insert { key, data }
}
run_test(|store, writable, deployment| async move {
// We have to produce a massive string (1_000_000 chars) because
// the repeated text compresses so well. This leads to an error
// 'index row requires 11488 bytes, maximum size is 8191' if
// used with a btree index without size limitation
let long_text = std::iter::repeat("Quo usque tandem")
.take(62500)
.collect::<String>();
let other_text = long_text.clone() + "X";
let metrics_registry = Arc::new(MockMetricsRegistry::new());
let stopwatch_metrics = StopwatchMetrics::new(
Logger::root(slog::Discard, o!()),
deployment.hash.clone(),
metrics_registry.clone(),
);
writable
.transact_block_operations(
TEST_BLOCK_3_PTR.clone(),
None,
vec![
make_insert_op(ONE, &long_text),
make_insert_op(TWO, &other_text),
],
stopwatch_metrics,
Vec::new(),
Vec::new(),
)
.expect("Failed to insert large text");
let query = user_query()
.first(5)
.filter(EntityFilter::Equal(
NAME.to_owned(),
long_text.clone().into(),
))
.asc(NAME);
let ids = store
.subgraph_store()
.find(query)
.expect("Could not find entity")
.iter()
.map(|e| e.id())
.collect::<Result<Vec<_>, _>>()
.expect("Found entities without an id");
assert_eq!(vec![ONE], ids);
// Make sure we check the full string and not just a prefix
let mut prefix = long_text.clone();
prefix.truncate(STRING_PREFIX_SIZE);
let query = user_query()
.first(5)
.filter(EntityFilter::LessOrEqual(NAME.to_owned(), prefix.into()))
.asc(NAME);
let ids = store
.subgraph_store()
.find(query)
.expect("Could not find entity")
.iter()
.map(|e| e.id())
.collect::<Result<Vec<_>, _>>()
.expect("Found entities without an id");
// Users with name 'Cindini' and 'Johnton'
assert_eq!(vec!["2", "1"], ids);
})
}
#[derive(Clone)]
struct WindowQuery(EntityQuery, Arc<DieselSubgraphStore>);
impl WindowQuery {
fn new(store: &Arc<DieselStore>) -> Self {
WindowQuery(
user_query()
.filter(EntityFilter::GreaterThan("age".into(), Value::from(0)))
.first(10),
store.subgraph_store(),
)
.default_window()
}
fn default_window(mut self) -> Self {
let entity_types = match self.0.collection {
EntityCollection::All(entity_types) => entity_types,
EntityCollection::Window(_) => {
unreachable!("we do not use this method with a windowed collection")
}
};
let windows = entity_types
.into_iter()
.map(|(child_type, column_names)| {
let attribute = WindowAttribute::Scalar("favorite_color".to_owned());
let link = EntityLink::Direct(attribute, ChildMultiplicity::Many);
let ids = vec!["red", "green", "yellow", "blue"]
.into_iter()
.map(String::from)
.collect();
EntityWindow {
child_type,
ids,
link,
column_names,
}
})
.collect();
self.0.collection = EntityCollection::Window(windows);
self
}
fn first(self, first: u32) -> Self {
WindowQuery(self.0.first(first), self.1)
}
fn skip(self, skip: u32) -> Self {
WindowQuery(self.0.skip(skip), self.1)
}
fn asc(self, attr: &str) -> Self {
WindowQuery(
self.0
.order(EntityOrder::Ascending(attr.to_owned(), ValueType::String)),
self.1,
)
}
fn desc(self, attr: &str) -> Self {
WindowQuery(
self.0
.order(EntityOrder::Descending(attr.to_owned(), ValueType::String)),
self.1,
)
}
fn unordered(self) -> Self {
WindowQuery(self.0.order(EntityOrder::Unordered), self.1)
}
fn above(self, age: i32) -> Self {
WindowQuery(
self.0
.filter(EntityFilter::GreaterThan("age".into(), Value::from(age))),
self.1,
)
}
fn against_color_and_age(self) -> Self {
let mut query = self.0;
query.collection = EntityCollection::All(vec![
(EntityType::from(USER), AttributeNames::All),
(EntityType::from("Person"), AttributeNames::All),
]);
WindowQuery(query, self.1).default_window()
}
fn expect(&self, mut expected_ids: Vec<&str>, qid: &str) {
let query = self.0.clone();
let store = &self.1;
let unordered = matches!(query.order, EntityOrder::Unordered);
let mut entity_ids = store
.find(query)
.expect("store.find failed to execute query")
.into_iter()
.map(|entity| match entity.get("id") {
Some(Value::String(id)) => id.to_owned(),
Some(_) => panic!("store.find returned entity with non-string ID attribute"),
None => panic!("store.find returned entity with no ID attribute"),
})
.collect::<Vec<_>>();
if unordered {
entity_ids.sort();
expected_ids.sort();
}
assert_eq!(expected_ids, entity_ids, "Failed query: {}", qid);
}
}
#[test]
fn window() {
fn make_color_end_age(entity_type: &str, id: &str, color: &str, age: i32) -> EntityOperation {
let mut entity = Entity::new();
entity.set("id", id.to_owned());
entity.set("age", age);
entity.set("favorite_color", color);
EntityOperation::Set {
key: EntityKey::data(
TEST_SUBGRAPH_ID.clone(),
entity_type.to_owned(),
id.to_owned(),
),
data: entity,
}
}
fn make_user(id: &str, color: &str, age: i32) -> EntityOperation {
make_color_end_age(USER, id, color, age)
}
fn make_person(id: &str, color: &str, age: i32) -> EntityOperation {
make_color_end_age("Person", id, color, age)
}
let ops = vec![
make_user("4", "green", 34),
make_user("5", "green", 17),
make_user("6", "green", 41),
make_user("7", "red", 25),
make_user("8", "red", 45),
make_user("9", "yellow", 37),
make_user("10", "blue", 27),
make_user("11", "blue", 19),
make_person("p1", "green", 12),
make_person("p2", "red", 15),
];
run_test(|store, _, deployment| async move {
transact_entity_operations(
&store.subgraph_store(),
&deployment,
TEST_BLOCK_3_PTR.clone(),
ops,
)
.expect("Failed to create test users");
// Get the first 2 entries in each 'color group'
WindowQuery::new(&store)
.first(2)
.expect(vec!["10", "11", "4", "5", "2", "7", "9"], "q1");
WindowQuery::new(&store)
.first(1)
.expect(vec!["10", "4", "2", "9"], "q2");
WindowQuery::new(&store)
.first(1)
.skip(1)
.expect(vec!["11", "5", "7"], "q3");
WindowQuery::new(&store)
.first(1)
.skip(1)
.desc("id")
.expect(vec!["10", "5", "7"], "q4");
WindowQuery::new(&store)
.first(1)
.skip(1)
.desc("favorite_color")
.expect(vec!["10", "5", "7"], "q5");
WindowQuery::new(&store)
.first(1)
.skip(1)
.desc("favorite_color")
.above(25)
.expect(vec!["4", "2"], "q6");
// Check queries for interfaces
WindowQuery::new(&store)
.first(1)
.skip(1)
.desc("favorite_color")
.above(12)
.against_color_and_age()
.expect(vec!["10", "5", "8"], "q7");
WindowQuery::new(&store)
.first(1)
.asc("age")
.above(12)
.against_color_and_age()
.expect(vec!["11", "5", "p2", "9"], "q8");
WindowQuery::new(&store)
.unordered()
.above(12)
.against_color_and_age()
.expect(
vec!["10", "11", "2", "4", "5", "6", "7", "8", "9", "p2"],
"q9",
);
});
}
#[test]
fn find_at_block() {
fn shaqueeena_at_block(block: BlockNumber, email: &'static str) {
run_test(move |store, _, _| async move {
let mut query = user_query()
.filter(EntityFilter::Equal("name".to_owned(), "Shaqueeena".into()))
.desc("name");
query.block = block;
let entities = store
.subgraph_store()
.find(query)
.expect("store.find failed to execute query");
assert_eq!(1, entities.len());
let entity = entities.first().unwrap();
assert_eq!(Some(&Value::from(email)), entity.get("email"));
})
}
shaqueeena_at_block(1, "[email protected]");
shaqueeena_at_block(2, "[email protected]");
shaqueeena_at_block(7000, "[email protected]");
}
#[test]
fn cleanup_cached_blocks() {
if store_is_sharded() {
// When the store is sharded, the setup in main.rs makes sure we
// don't ever try to clean up cached blocks
println!("store is sharded, skipping test");
return;
}
run_test(|store, _, _| async move {
use block_store::*;
// The test subgraph is at block 2. Since we don't ever delete
// the genesis block, the only block eligible for cleanup is BLOCK_ONE
// and the first retained block is block 2.
block_store::set_chain(
vec![&*GENESIS_BLOCK, &*BLOCK_ONE, &*BLOCK_TWO, &*BLOCK_THREE],
NETWORK_NAME,
);
let chain_store = store
.block_store()
.chain_store(NETWORK_NAME)
.expect("fake chain store");
let cleaned = chain_store
.cleanup_cached_blocks(10)
.expect("cleanup succeeds");
assert_eq!(Some((2, 1)), cleaned);
})
}
#[test]
fn reorg_tracking() {
fn update_john(
store: &Arc<DieselSubgraphStore>,
deployment: &DeploymentLocator,
age: i32,
block: &BlockPtr,
) {
let test_entity_1 = create_test_entity(
"1",
USER,
"Johnton",
"[email protected]",
age,
184.4,
false,
None,
);
transact_entity_operations(store, deployment, block.clone(), vec![test_entity_1]).unwrap();
}
macro_rules! check_state {
($store:expr,
$reorg_count: expr,
$max_reorg_depth:expr,
$latest_ethereum_block_number:expr) => {
let subgraph_id = TEST_SUBGRAPH_ID.to_owned();
let state = deployment_state($store.as_ref(), &subgraph_id).await;
assert_eq!(&subgraph_id, &state.id, "subgraph_id");
assert_eq!($reorg_count, state.reorg_count, "reorg_count");
assert_eq!($max_reorg_depth, state.max_reorg_depth, "max_reorg_depth");
assert_eq!(
$latest_ethereum_block_number, state.latest_ethereum_block_number,
"latest_ethereum_block_number"
);
};
}
// Check that reorg_count, max_reorg_depth, and latest_ethereum_block_number
// are reported correctly in DeploymentState
run_test(|store, _, deployment| async move {
let subgraph_store = store.subgraph_store();
check_state!(store, 0, 0, 2);
// Jump to block 4
transact_entity_operations(
&subgraph_store,
&deployment,
TEST_BLOCK_4_PTR.clone(),
vec![],
)
.unwrap();
check_state!(store, 0, 0, 4);
// Back to block 3
revert_block(&store, &deployment, &*TEST_BLOCK_3_PTR).await;
check_state!(store, 1, 1, 3);
// Back to block 2
revert_block(&store, &deployment, &*TEST_BLOCK_2_PTR).await;
check_state!(store, 2, 2, 2);
// Forward to block 3
update_john(&subgraph_store, &deployment, 70, &TEST_BLOCK_3_PTR);
check_state!(store, 2, 2, 3);
// Forward to block 4
update_john(&subgraph_store, &deployment, 71, &TEST_BLOCK_4_PTR);
check_state!(store, 2, 2, 4);
// Forward to block 5
update_john(&subgraph_store, &deployment, 72, &TEST_BLOCK_5_PTR);
check_state!(store, 2, 2, 5);
// Revert all the way back to block 2
revert_block(&store, &deployment, &*TEST_BLOCK_4_PTR).await;
check_state!(store, 3, 2, 4);
revert_block(&store, &deployment, &*TEST_BLOCK_3_PTR).await;
check_state!(store, 4, 2, 3);
revert_block(&store, &deployment, &*TEST_BLOCK_2_PTR).await;
check_state!(store, 5, 3, 2);
})
}
|
{
let mut test_entity = Entity::new();
test_entity.insert("id".to_owned(), Value::String(id.to_owned()));
test_entity.insert("name".to_owned(), Value::String(name.to_owned()));
let bin_name = scalar::Bytes::from_str(&hex::encode(name)).unwrap();
test_entity.insert("bin_name".to_owned(), Value::Bytes(bin_name));
test_entity.insert("email".to_owned(), Value::String(email.to_owned()));
test_entity.insert("age".to_owned(), Value::Int(age));
test_entity.insert(
"seconds_age".to_owned(),
Value::BigInt(BigInt::from(age) * 31557600.into()),
);
test_entity.insert("weight".to_owned(), Value::BigDecimal(weight.into()));
test_entity.insert("coffee".to_owned(), Value::Bool(coffee));
test_entity.insert(
"favorite_color".to_owned(),
favorite_color
.map(|s| Value::String(s.to_owned()))
.unwrap_or(Value::Null),
);
EntityOperation::Set {
key: EntityKey::data(
TEST_SUBGRAPH_ID.clone(),
entity_type.to_owned(),
id.to_owned(),
),
data: test_entity,
}
}
|
karma.conf.js
|
module.exports = function(config) {
config.set({
files: [
'node_modules/jquery/dist/jquery.js',
'dist/jquery.boilerplate.min.js',
'test/setup.js',
'test/spec/*'
|
});
};
|
],
frameworks: ['qunit'],
autoWatch: true
|
singleton.py
|
#!/usr/bin/env python
"""
SINGLETON
Use the Singleton pattern when:
1. there must be exactly one instance of a class, and it must be
accessible to clients from a well-known access point.
2. the sole instance should be extensible by subclassing, and clients
should be able to use an extended instance without modifying their code.
"""
import logging
class Connection(object):
"""
Singleton
1. Defines an Instance operation that lets clients access its unique
instance.
|
"""
def __new__(type):
if not '_connection' in type.__dict__:
type._connection = object.__new__(type)
logging.basicConfig(level=logging.INFO)
logging.info('New database connection created!')
logging.info('Connection established.')
return type._connection
if __name__ == "__main__":
c = Connection()
d = Connection()
|
2. May be responsible for creating its own unique instance.
|
crd.go
|
package v1
import (
"fmt"
"reflect"
"time"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
)
// CreateCustomResourceDefinition creates the CRD and add it into Kubernetes. If there is error,
// it will do some clean up.
func CreateCustomResourceDefinition(namespace string, clientSet apiextensionsclientset.Interface) (*apiextensionsv1beta1.CustomResourceDefinition, error) {
crd := &apiextensionsv1beta1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: CRDName,
Namespace: namespace,
},
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
Group: GroupName,
Version: SchemeGroupVersion.Version,
Scope: apiextensionsv1beta1.NamespaceScoped,
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: Plural,
Kind: reflect.TypeOf(SnapshotGroup{}).Name(),
},
},
}
_, err := clientSet.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd)
if err == nil {
fmt.Println("CRD SnapshotGroup is created")
} else if apierrors.IsAlreadyExists(err) {
fmt.Println("CRD SnapshotGroup already exists")
} else {
fmt.Printf("Fail to create CRD SnapshotGroup: %+v\n", err)
return nil, err
}
// Wait for CRD creation.
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
crd, err = clientSet.ApiextensionsV1beta1().CustomResourceDefinitions().Get(CRDName, metav1.GetOptions{})
if err != nil {
fmt.Printf("Fail to wait for CRD SnapshotGroup creation: %+v\n", err)
return false, err
}
for _, cond := range crd.Status.Conditions {
switch cond.Type {
case apiextensionsv1beta1.Established:
if cond.Status == apiextensionsv1beta1.ConditionTrue {
return true, err
}
case apiextensionsv1beta1.NamesAccepted:
if cond.Status == apiextensionsv1beta1.ConditionFalse
|
}
}
return false, err
})
// If there is an error, delete the object to keep it clean.
if err != nil {
fmt.Println("Try to cleanup")
deleteErr := clientSet.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(CRDName, nil)
if deleteErr != nil {
fmt.Printf("Fail to delete CRD SnapshotGroup: %+v\n", deleteErr)
return nil, errors.NewAggregate([]error{err, deleteErr})
}
return nil, err
}
return crd, nil
}
|
{
fmt.Printf("Name conflict while wait for CRD SnapshotGroup creation: %s, %+v\n", cond.Reason, err)
}
|
simulated_propagation.py
|
#!/usr/bin/python3
__author__ = "Mark H. Meng"
__copyright__ = "Copyright 2021, National University of S'pore and A*STAR"
__credits__ = ["G. Bai", "H. Guo", "S. G. Teo", "J. S. Dong"]
__license__ = "MIT"
import paoding.utility.interval_arithmetic as ia
import paoding.utility.utils as utils
import math
def calculate_bounds_of_output(model, intervals, loc):
# Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
num_layers = len(model.layers)
# Just return these intervals if current location is at the 2nd last layer
if loc == num_layers - 1:
return intervals
total_pruned_count = 0
propagated_next_layer_interval = None
while loc < num_layers - 1:
# Exclude non FC layers
num_curr_neurons = len(w[loc + 1][0])
num_next_neurons = len(w[loc + 1][0][0])
relu_activation = g[loc]['activation'] == 'relu'
if len(intervals) != num_curr_neurons:
raise Exception("Error: input intervals are not in expected shape -",
num_curr_neurons, "expected, not", len(intervals))
# No activation at the output layer
if loc + 1 == num_layers - 1:
propagated_next_layer_interval = ia.forward_propogation(intervals,
w[loc + 1][0],
w[loc + 1][1],
activation=False)
else:
propagated_next_layer_interval = ia.forward_propogation(intervals,
w[loc + 1][0],
w[loc + 1][1],
activation=True,
relu_activation=relu_activation)
intervals = propagated_next_layer_interval
loc += 1
return propagated_next_layer_interval
# Return the evaluation of the impact in a pair of real numbers as interval
def calculate_impact_of_pruning_next_layer(model, big_map, pruning_pairs, loc, cumulative_next_layer_intervals=None,
kaggle_credit=False):
# Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
# Each pruning pair is in form of a tuple (a,b), in which "a" is the hidden unit to be pruned, and "b"
# is the one to remain. The Delta produced by this pruning is as follow:
# Delta = [b * (w_a + w_b) + 2 * bias_b] - [a * w_a + bias_a + b * w_b + bias_b]
# = (b-a) * w_a + (bias_b - bias_a)
# or if we omit the impact of bias:
# Delta = [b * (w_a + w_b)] - [a * w_a + b * w_b]
# = (b-a) * w_a
# The Delta produced by each pruning is presented at the next layer, and the propagation
# simulates the impact of Delta to the output layer
# In case there is a single unit pruning, s.t. b = -1
# the Delta will be -1 * (a * w_a)
next_layer_size = len(w[loc+1][0][0])
if cumulative_next_layer_intervals is None:
empty_interval = (0,0)
cumulative_next_layer_intervals = [empty_interval for i in range(0, next_layer_size)]
num_layers = len(model.layers)
for (a, b) in pruning_pairs:
(a_lo, a_hi) = big_map[loc][a]
# DEPRECATED
# (a_lo, a_hi) = get_definition_interval(a, loc, parameters=w, relu_activation=use_relu, kaggle_credit=kaggle_credit)
# Check if there is a pair pruning or single unit pruning (b=-1)
if b != -1:
(b_lo, b_hi) = big_map[loc][b]
# DEPRECATED
# (b_lo, b_hi) = get_definition_interval(b, loc, parameters=w, relu_activation=use_relu, kaggle_credit=kaggle_credit)
# approximate the result of (a-b)
(a_minus_b_lo, a_minus_b_hi) = ia.interval_minus((a_lo, a_hi), (b_lo, b_hi))
w_a = w[loc + 1][0][a]
if len(w_a) is not next_layer_size:
raise Exception("Inconsistent size of parameters")
impact_to_next_layer = [ia.interval_scale((a_minus_b_lo, a_minus_b_hi), k) for k in w_a]
else:
w_a = w[loc + 1][0][a]
if len(w_a) is not next_layer_size:
raise Exception("Inconsistent size of parameters")
impact_to_next_layer = [ia.interval_scale((a_lo, a_hi), -1*k) for k in w_a]
if len(impact_to_next_layer) is not next_layer_size:
raise Exception("Inconsistent size of parameters")
for index, interval in enumerate(cumulative_next_layer_intervals):
cumulative_next_layer_intervals[index] = ia.interval_add(interval, impact_to_next_layer[index])
#print(cumulative_next_layer_intervals)
return cumulative_next_layer_intervals
def get_definition_map(model, definition_dict=None, input_interval=(0, 1)):
# First locate the dense (FC) layers, starting from the input layer/flatten layer until the second last layer
## Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
num_layers = len(model.layers)
layer_idx = 0
starting_layer_index = -1
ending_layer_index = -1
while layer_idx < num_layers - 1:
if "dense" in model.layers[layer_idx].name:
if starting_layer_index < 0:
starting_layer_index = layer_idx - 1
if ending_layer_index < layer_idx:
ending_layer_index = layer_idx
layer_idx += 1
if (starting_layer_index < 0) or (ending_layer_index < 0):
raise Exception("Fully connected layers not identified")
# Now let's create a hash table as dictionary to store all definition intervals of FC neurons
if definition_dict is None:
definition_dict = {}
definition_dict[starting_layer_index] = {}
for i in range(0, len(w[starting_layer_index + 1][0])):
definition_dict[starting_layer_index][i] = input_interval
for i in range(starting_layer_index + 1, ending_layer_index + 1):
num_prev_neurons = len(w[i][0])
num_curr_neurons = len(w[i][0][0])
if i not in definition_dict.keys():
definition_dict[i] = {}
curr_activation = g[i]['activation']
for m in range(0, num_curr_neurons):
(sum_lo, sum_hi) = (0, 0)
for n in range(0, num_prev_neurons):
affine_w_x = ia.interval_scale(definition_dict[i-1][n], w[i][0][n][m])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), affine_w_x)
bias = (w[i][1][m], w[i][1][m])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), bias)
if curr_activation == 'relu':
definition_dict[i][m] = (0, sum_hi)
else: # Assume it is sigmoid
sum_hi = 1 / (1 + math.exp(-1 * sum_hi))
sum_lo = 1 / (1 + math.exp(-1 * sum_lo))
definition_dict[i][m] = (sum_lo, sum_hi)
return definition_dict
# DEPRECATED - Replaced by initialize_definition_map
def get_definition_interval(unit_index, layer_index, parameters, relu_activation=True, kaggle_credit=False):
|
if kaggle_credit:
input_definition_interval = (-5, 5)
else:
input_definition_interval = (0, 1)
# input_size = len(parameters[1][0])
# Starting from input layer (MLP) or the last flatten layer (CNN)
if layer_index == 1 or (layer_index>1 and not parameters[layer_index-1]):
#print(">> DEBUG: unit_index:", unit_index, " & layer_index:", layer_index)
weights = [parameters[layer_index][0][j][unit_index] for j in range(0, len(parameters[layer_index][0]))]
bias = parameters[layer_index][1][unit_index]
(sum_lo, sum_hi) = ia.interval_sum([ia.interval_scale(input_definition_interval, w) for w in weights])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), (bias, bias))
if relu_activation:
if sum_hi < 0:
sum_hi = 0
if sum_lo < 0:
sum_lo = 0
else:
sum_hi = 1 / (1 + math.exp(-1 * sum_hi))
sum_lo = 1 / (1 + math.exp(-1 * sum_lo))
return (sum_lo, sum_hi)
# Temp Wordaround: no definition algorithm avaliable for nodes after the 2nd layer, set as [-1,1]
else:
weights = [parameters[layer_index][0][j][unit_index] for j in range(0, len(parameters[layer_index][0]))]
bias = parameters[layer_index][1][unit_index]
(sum_lo, sum_hi) = ia.interval_sum([ia.interval_scale(input_definition_interval, w) for w in weights])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), (bias, bias))
if relu_activation:
if sum_hi < 0:
sum_hi = 0
if sum_lo < 0:
sum_lo = 0
else:
sum_hi = 1 / (1 + math.exp(-1 * sum_hi))
sum_lo = 1 / (1 + math.exp(-1 * sum_lo))
return (sum_lo, sum_hi)
return None
|
|
version.go
|
package gocv
/*
#include <stdlib.h>
#include "version.h"
*/
import "C"
// GoCVVersion of this package, for display purposes.
const GoCVVersion = "0.11.0"
// Version returns the current golang package version
func Version() string {
return GoCVVersion
}
// OpenCVVersion returns the current OpenCV lib version
func
|
() string {
return C.GoString(C.openCVVersion())
}
|
OpenCVVersion
|
bonding.go
|
package netconf
import (
"io/ioutil"
"os"
"os/exec"
"strings"
"time"
"github.com/burmilla/os/pkg/log"
"github.com/vishvananda/netlink"
)
const (
base = "/sys/class/net/"
bondingMasters = "/sys/class/net/bonding_masters"
)
type Bonding struct {
name string
}
func (b *Bonding) init() error {
_, err := os.Stat(bondingMasters)
if os.IsNotExist(err) {
log.Info("Loading bonding kernel module")
cmd := exec.Command("modprobe", "bonding")
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdin
err = cmd.Run()
if err != nil {
for i := 0; i < 30; i++ {
if _, err = os.Stat(bondingMasters); err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
}
}
_, err = os.Stat(bondingMasters)
return err
}
func contains(file, word string) (bool, error) {
words, err := ioutil.ReadFile(file)
if err != nil {
return false, err
}
for _, s := range strings.Split(strings.TrimSpace(string(words)), " ") {
if s == strings.TrimSpace(word) {
return true, nil
}
}
return false, nil
}
func (b *Bonding) linkDown() error {
link, err := netlink.LinkByName(b.name)
if err != nil {
return err
}
return netlink.LinkSetDown(link)
}
func (b *Bonding) ListSlaves() ([]string, error) {
file := base + b.name + "/bonding/slaves"
words, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
result := []string{}
for _, s := range strings.Split(strings.TrimSpace(string(words)), " ") {
if s != "" {
result = append(result, s)
}
}
return result, nil
}
func (b *Bonding) RemoveSlave(slave string) error {
if ok, err := contains(base+b.name+"/bonding/slaves", slave); err != nil {
return err
} else if !ok {
return nil
}
p := base + b.name + "/bonding/slaves"
log.Infof("Removing slave %s from master %s", slave, b.name)
return ioutil.WriteFile(p, []byte("-"+slave), 0644)
}
func (b *Bonding) AddSlave(slave string) error {
if ok, err := contains(base+b.name+"/bonding/slaves", slave); err != nil {
return err
} else if ok {
return nil
}
p := base + b.name + "/bonding/slaves"
log.Infof("Adding slave %s to master %s", slave, b.name)
return ioutil.WriteFile(p, []byte("+"+slave), 0644)
}
func (b *Bonding) Opt(key, value string) error {
if key == "mode" {
// Don't care about errors here
b.linkDown()
slaves, _ := b.ListSlaves()
for _, slave := range slaves {
b.RemoveSlave(slave)
}
}
p := base + b.name + "/bonding/" + key
if err := ioutil.WriteFile(p, []byte(value), 0644); err != nil {
log.Errorf("Failed to set %s=%s on %s: %v", key, value, b.name, err)
return err
}
log.Infof("Set %s=%s on %s", key, value, b.name)
return nil
}
func
|
(name string) (*Bonding, error) {
b := &Bonding{name: name}
if err := b.init(); err != nil {
return nil, err
}
if ok, err := contains(bondingMasters, name); err != nil {
return nil, err
} else if ok {
return b, nil
}
log.Infof("Creating bond %s", name)
return b, ioutil.WriteFile(bondingMasters, []byte("+"+name), 0644)
}
|
Bond
|
view.py
|
from rest_framework import mixins, generics
from rest_framework.response import Response
from service_user.exports import get_user_instance_with_token_id
from service_application.exports import get_app_instance_with_app_id
from service_application.models import UsingApplicationModel
from utils import log, ParameterKeys
class UninstallAPI(generics.GenericAPIView, mixins.DestroyModelMixin):
def delete(self, request):
log(self.delete, params=request.query_params)
# get user instance
_ui = get_user_instance_with_token_id(
user_id=request.query_params.get(ParameterKeys.USER_ID, None),
token_id=request.query_params.get(ParameterKeys.TOKEN_ID, None)
)
if _ui is None:
return Response({
|
})
# get application instance
_ai = get_app_instance_with_app_id(
app_id=request.query_params.get(ParameterKeys.APPLICATION_ID)
)
if _ai is None:
return Response({
ParameterKeys.STATUS: ParameterKeys.INVALID,
ParameterKeys.CODE: ParameterKeys.INVALID_APPLICATION
})
# response
try:
_i = UsingApplicationModel.objects.get(
application_id=_ai.id,
user_id=_ui.id
)
except UsingApplicationModel.DoesNotExist:
return Response({
ParameterKeys.STATUS: ParameterKeys.INVALID,
ParameterKeys.CODE: ParameterKeys.INVALID_APPLICATION
})
self.perform_destroy(_i)
return Response({
ParameterKeys.STATUS: ParameterKeys.SUCCESS,
ParameterKeys.APPLICATION_ID: str(_ai.app_id)
})
|
ParameterKeys.STATUS: ParameterKeys.INVALID,
ParameterKeys.CODE: ParameterKeys.INVALID_USER
|
namespace_mock.go
|
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/storage/namespace/types.go
// Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package namespace is a generated GoMock package.
package namespace
import (
"reflect"
"time"
"github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3x/ident"
"github.com/m3db/m3x/instrument"
"github.com/golang/mock/gomock"
)
// MockOptions is a mock of Options interface
type MockOptions struct {
ctrl *gomock.Controller
recorder *MockOptionsMockRecorder
}
// MockOptionsMockRecorder is the mock recorder for MockOptions
type MockOptionsMockRecorder struct {
mock *MockOptions
}
// NewMockOptions creates a new mock instance
func NewMockOptions(ctrl *gomock.Controller) *MockOptions {
mock := &MockOptions{ctrl: ctrl}
mock.recorder = &MockOptionsMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockOptions) EXPECT() *MockOptionsMockRecorder {
return m.recorder
}
// Validate mocks base method
func (m *MockOptions) Validate() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Validate")
ret0, _ := ret[0].(error)
return ret0
}
// Validate indicates an expected call of Validate
func (mr *MockOptionsMockRecorder) Validate() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockOptions)(nil).Validate))
}
// Equal mocks base method
func (m *MockOptions) Equal(value Options) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Equal", value)
ret0, _ := ret[0].(bool)
return ret0
}
// Equal indicates an expected call of Equal
func (mr *MockOptionsMockRecorder) Equal(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Equal", reflect.TypeOf((*MockOptions)(nil).Equal), value)
}
// SetBootstrapEnabled mocks base method
func (m *MockOptions) SetBootstrapEnabled(value bool) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetBootstrapEnabled", value)
ret0, _ := ret[0].(Options)
return ret0
}
// SetBootstrapEnabled indicates an expected call of SetBootstrapEnabled
func (mr *MockOptionsMockRecorder) SetBootstrapEnabled(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBootstrapEnabled", reflect.TypeOf((*MockOptions)(nil).SetBootstrapEnabled), value)
}
// BootstrapEnabled mocks base method
func (m *MockOptions) BootstrapEnabled() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BootstrapEnabled")
ret0, _ := ret[0].(bool)
return ret0
}
// BootstrapEnabled indicates an expected call of BootstrapEnabled
func (mr *MockOptionsMockRecorder) BootstrapEnabled() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapEnabled", reflect.TypeOf((*MockOptions)(nil).BootstrapEnabled))
}
// SetFlushEnabled mocks base method
func (m *MockOptions) SetFlushEnabled(value bool) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetFlushEnabled", value)
ret0, _ := ret[0].(Options)
return ret0
}
// SetFlushEnabled indicates an expected call of SetFlushEnabled
func (mr *MockOptionsMockRecorder) SetFlushEnabled(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFlushEnabled", reflect.TypeOf((*MockOptions)(nil).SetFlushEnabled), value)
}
// FlushEnabled mocks base method
func (m *MockOptions) FlushEnabled() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FlushEnabled")
ret0, _ := ret[0].(bool)
return ret0
}
// FlushEnabled indicates an expected call of FlushEnabled
func (mr *MockOptionsMockRecorder) FlushEnabled() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlushEnabled", reflect.TypeOf((*MockOptions)(nil).FlushEnabled))
}
// SetSnapshotEnabled mocks base method
func (m *MockOptions) SetSnapshotEnabled(value bool) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetSnapshotEnabled", value)
ret0, _ := ret[0].(Options)
return ret0
}
// SetSnapshotEnabled indicates an expected call of SetSnapshotEnabled
func (mr *MockOptionsMockRecorder) SetSnapshotEnabled(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSnapshotEnabled", reflect.TypeOf((*MockOptions)(nil).SetSnapshotEnabled), value)
}
// SnapshotEnabled mocks base method
func (m *MockOptions) SnapshotEnabled() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SnapshotEnabled")
ret0, _ := ret[0].(bool)
return ret0
}
// SnapshotEnabled indicates an expected call of SnapshotEnabled
func (mr *MockOptionsMockRecorder) SnapshotEnabled() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SnapshotEnabled", reflect.TypeOf((*MockOptions)(nil).SnapshotEnabled))
}
// SetWritesToCommitLog mocks base method
func (m *MockOptions) SetWritesToCommitLog(value bool) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetWritesToCommitLog", value)
ret0, _ := ret[0].(Options)
return ret0
}
// SetWritesToCommitLog indicates an expected call of SetWritesToCommitLog
func (mr *MockOptionsMockRecorder) SetWritesToCommitLog(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWritesToCommitLog", reflect.TypeOf((*MockOptions)(nil).SetWritesToCommitLog), value)
}
// WritesToCommitLog mocks base method
func (m *MockOptions) WritesToCommitLog() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WritesToCommitLog")
ret0, _ := ret[0].(bool)
return ret0
}
// WritesToCommitLog indicates an expected call of WritesToCommitLog
func (mr *MockOptionsMockRecorder) WritesToCommitLog() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritesToCommitLog", reflect.TypeOf((*MockOptions)(nil).WritesToCommitLog))
}
// SetCleanupEnabled mocks base method
func (m *MockOptions) SetCleanupEnabled(value bool) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetCleanupEnabled", value)
ret0, _ := ret[0].(Options)
return ret0
}
// SetCleanupEnabled indicates an expected call of SetCleanupEnabled
func (mr *MockOptionsMockRecorder) SetCleanupEnabled(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCleanupEnabled", reflect.TypeOf((*MockOptions)(nil).SetCleanupEnabled), value)
}
// CleanupEnabled mocks base method
func (m *MockOptions) CleanupEnabled() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CleanupEnabled")
ret0, _ := ret[0].(bool)
return ret0
}
// CleanupEnabled indicates an expected call of CleanupEnabled
func (mr *MockOptionsMockRecorder) CleanupEnabled() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupEnabled", reflect.TypeOf((*MockOptions)(nil).CleanupEnabled))
}
// SetRepairEnabled mocks base method
func (m *MockOptions) SetRepairEnabled(value bool) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetRepairEnabled", value)
ret0, _ := ret[0].(Options)
return ret0
}
// SetRepairEnabled indicates an expected call of SetRepairEnabled
func (mr *MockOptionsMockRecorder) SetRepairEnabled(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRepairEnabled", reflect.TypeOf((*MockOptions)(nil).SetRepairEnabled), value)
}
// RepairEnabled mocks base method
func (m *MockOptions) RepairEnabled() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RepairEnabled")
ret0, _ := ret[0].(bool)
return ret0
}
// RepairEnabled indicates an expected call of RepairEnabled
func (mr *MockOptionsMockRecorder) RepairEnabled() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RepairEnabled", reflect.TypeOf((*MockOptions)(nil).RepairEnabled))
}
// SetRetentionOptions mocks base method
func (m *MockOptions) SetRetentionOptions(value retention.Options) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetRetentionOptions", value)
ret0, _ := ret[0].(Options)
return ret0
}
// SetRetentionOptions indicates an expected call of SetRetentionOptions
func (mr *MockOptionsMockRecorder) SetRetentionOptions(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRetentionOptions", reflect.TypeOf((*MockOptions)(nil).SetRetentionOptions), value)
}
// RetentionOptions mocks base method
func (m *MockOptions) RetentionOptions() retention.Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RetentionOptions")
ret0, _ := ret[0].(retention.Options)
return ret0
}
// RetentionOptions indicates an expected call of RetentionOptions
func (mr *MockOptionsMockRecorder) RetentionOptions() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RetentionOptions", reflect.TypeOf((*MockOptions)(nil).RetentionOptions))
}
// SetIndexOptions mocks base method
func (m *MockOptions) SetIndexOptions(value IndexOptions) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetIndexOptions", value)
ret0, _ := ret[0].(Options)
return ret0
}
// SetIndexOptions indicates an expected call of SetIndexOptions
func (mr *MockOptionsMockRecorder) SetIndexOptions(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIndexOptions", reflect.TypeOf((*MockOptions)(nil).SetIndexOptions), value)
}
// IndexOptions mocks base method
func (m *MockOptions) IndexOptions() IndexOptions {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IndexOptions")
ret0, _ := ret[0].(IndexOptions)
return ret0
}
// IndexOptions indicates an expected call of IndexOptions
func (mr *MockOptionsMockRecorder) IndexOptions() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexOptions", reflect.TypeOf((*MockOptions)(nil).IndexOptions))
}
// MockIndexOptions is a mock of IndexOptions interface
type MockIndexOptions struct {
ctrl *gomock.Controller
recorder *MockIndexOptionsMockRecorder
}
// MockIndexOptionsMockRecorder is the mock recorder for MockIndexOptions
type MockIndexOptionsMockRecorder struct {
mock *MockIndexOptions
}
// NewMockIndexOptions creates a new mock instance
func NewMockIndexOptions(ctrl *gomock.Controller) *MockIndexOptions {
mock := &MockIndexOptions{ctrl: ctrl}
mock.recorder = &MockIndexOptionsMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockIndexOptions) EXPECT() *MockIndexOptionsMockRecorder {
return m.recorder
}
// Equal mocks base method
func (m *MockIndexOptions) Equal(value IndexOptions) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Equal", value)
ret0, _ := ret[0].(bool)
return ret0
}
// Equal indicates an expected call of Equal
func (mr *MockIndexOptionsMockRecorder) Equal(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Equal", reflect.TypeOf((*MockIndexOptions)(nil).Equal), value)
}
// SetEnabled mocks base method
func (m *MockIndexOptions) SetEnabled(value bool) IndexOptions {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetEnabled", value)
ret0, _ := ret[0].(IndexOptions)
return ret0
}
// SetEnabled indicates an expected call of SetEnabled
func (mr *MockIndexOptionsMockRecorder) SetEnabled(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEnabled", reflect.TypeOf((*MockIndexOptions)(nil).SetEnabled), value)
}
// Enabled mocks base method
func (m *MockIndexOptions) Enabled() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Enabled")
ret0, _ := ret[0].(bool)
return ret0
}
// Enabled indicates an expected call of Enabled
func (mr *MockIndexOptionsMockRecorder) Enabled() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockIndexOptions)(nil).Enabled))
}
// SetBlockSize mocks base method
func (m *MockIndexOptions) SetBlockSize(value time.Duration) IndexOptions {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetBlockSize", value)
ret0, _ := ret[0].(IndexOptions)
return ret0
}
// SetBlockSize indicates an expected call of SetBlockSize
func (mr *MockIndexOptionsMockRecorder) SetBlockSize(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBlockSize", reflect.TypeOf((*MockIndexOptions)(nil).SetBlockSize), value)
}
// BlockSize mocks base method
func (m *MockIndexOptions) BlockSize() time.Duration {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BlockSize")
ret0, _ := ret[0].(time.Duration)
return ret0
}
// BlockSize indicates an expected call of BlockSize
func (mr *MockIndexOptionsMockRecorder) BlockSize() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockSize", reflect.TypeOf((*MockIndexOptions)(nil).BlockSize))
}
// MockMetadata is a mock of Metadata interface
type MockMetadata struct {
ctrl *gomock.Controller
recorder *MockMetadataMockRecorder
}
// MockMetadataMockRecorder is the mock recorder for MockMetadata
type MockMetadataMockRecorder struct {
mock *MockMetadata
}
// NewMockMetadata creates a new mock instance
func NewMockMetadata(ctrl *gomock.Controller) *MockMetadata {
mock := &MockMetadata{ctrl: ctrl}
mock.recorder = &MockMetadataMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockMetadata) EXPECT() *MockMetadataMockRecorder {
return m.recorder
}
// Equal mocks base method
func (m *MockMetadata) Equal(value Metadata) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Equal", value)
ret0, _ := ret[0].(bool)
return ret0
}
// Equal indicates an expected call of Equal
func (mr *MockMetadataMockRecorder) Equal(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Equal", reflect.TypeOf((*MockMetadata)(nil).Equal), value)
}
// ID mocks base method
func (m *MockMetadata) ID() ident.ID {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ID")
ret0, _ := ret[0].(ident.ID)
return ret0
}
// ID indicates an expected call of ID
func (mr *MockMetadataMockRecorder) ID() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockMetadata)(nil).ID))
}
// Options mocks base method
func (m *MockMetadata) Options() Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Options")
ret0, _ := ret[0].(Options)
return ret0
}
// Options indicates an expected call of Options
func (mr *MockMetadataMockRecorder) Options() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Options", reflect.TypeOf((*MockMetadata)(nil).Options))
}
// MockMap is a mock of Map interface
type MockMap struct {
ctrl *gomock.Controller
recorder *MockMapMockRecorder
}
// MockMapMockRecorder is the mock recorder for MockMap
type MockMapMockRecorder struct {
mock *MockMap
}
// NewMockMap creates a new mock instance
func NewMockMap(ctrl *gomock.Controller) *MockMap {
mock := &MockMap{ctrl: ctrl}
mock.recorder = &MockMapMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockMap) EXPECT() *MockMapMockRecorder {
return m.recorder
}
// Equal mocks base method
func (m *MockMap) Equal(value Map) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Equal", value)
ret0, _ := ret[0].(bool)
return ret0
}
// Equal indicates an expected call of Equal
func (mr *MockMapMockRecorder) Equal(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Equal", reflect.TypeOf((*MockMap)(nil).Equal), value)
}
// Get mocks base method
func (m *MockMap) Get(arg0 ident.ID) (Metadata, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0)
ret0, _ := ret[0].(Metadata)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get
func (mr *MockMapMockRecorder) Get(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMap)(nil).Get), arg0)
}
// IDs mocks base method
func (m *MockMap) IDs() []ident.ID {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IDs")
ret0, _ := ret[0].([]ident.ID)
return ret0
}
// IDs indicates an expected call of IDs
func (mr *MockMapMockRecorder) IDs() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IDs", reflect.TypeOf((*MockMap)(nil).IDs))
}
// Metadatas mocks base method
func (m *MockMap) Metadatas() []Metadata {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Metadatas")
ret0, _ := ret[0].([]Metadata)
return ret0
}
// Metadatas indicates an expected call of Metadatas
func (mr *MockMapMockRecorder) Metadatas() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadatas", reflect.TypeOf((*MockMap)(nil).Metadatas))
}
// MockWatch is a mock of Watch interface
type MockWatch struct {
ctrl *gomock.Controller
recorder *MockWatchMockRecorder
}
// MockWatchMockRecorder is the mock recorder for MockWatch
type MockWatchMockRecorder struct {
mock *MockWatch
}
// NewMockWatch creates a new mock instance
func NewMockWatch(ctrl *gomock.Controller) *MockWatch {
mock := &MockWatch{ctrl: ctrl}
mock.recorder = &MockWatchMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockWatch) EXPECT() *MockWatchMockRecorder {
return m.recorder
}
// C mocks base method
func (m *MockWatch) C() <-chan struct{} {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "C")
ret0, _ := ret[0].(<-chan struct{})
return ret0
}
// C indicates an expected call of C
func (mr *MockWatchMockRecorder) C() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "C", reflect.TypeOf((*MockWatch)(nil).C))
}
// Get mocks base method
func (m *MockWatch) Get() Map {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get")
ret0, _ := ret[0].(Map)
return ret0
}
// Get indicates an expected call of Get
func (mr *MockWatchMockRecorder) Get() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockWatch)(nil).Get))
}
// Close mocks base method
func (m *MockWatch) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close
func (mr *MockWatchMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockWatch)(nil).Close))
}
// MockRegistry is a mock of Registry interface
type MockRegistry struct {
ctrl *gomock.Controller
recorder *MockRegistryMockRecorder
}
// MockRegistryMockRecorder is the mock recorder for MockRegistry
type MockRegistryMockRecorder struct {
mock *MockRegistry
}
// NewMockRegistry creates a new mock instance
func NewMockRegistry(ctrl *gomock.Controller) *MockRegistry {
mock := &MockRegistry{ctrl: ctrl}
mock.recorder = &MockRegistryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockRegistry) EXPECT() *MockRegistryMockRecorder {
return m.recorder
}
// Watch mocks base method
func (m *MockRegistry) Watch() (Watch, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Watch")
ret0, _ := ret[0].(Watch)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Watch indicates an expected call of Watch
func (mr *MockRegistryMockRecorder) Watch() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockRegistry)(nil).Watch))
}
// Close mocks base method
func (m *MockRegistry) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close
func (mr *MockRegistryMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockRegistry)(nil).Close))
}
// MockInitializer is a mock of Initializer interface
type MockInitializer struct {
ctrl *gomock.Controller
recorder *MockInitializerMockRecorder
}
// MockInitializerMockRecorder is the mock recorder for MockInitializer
type MockInitializerMockRecorder struct {
mock *MockInitializer
}
// NewMockInitializer creates a new mock instance
func NewMockInitializer(ctrl *gomock.Controller) *MockInitializer {
mock := &MockInitializer{ctrl: ctrl}
mock.recorder = &MockInitializerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockInitializer) EXPECT() *MockInitializerMockRecorder {
return m.recorder
}
// Init mocks base method
func (m *MockInitializer) Init() (Registry, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Init")
ret0, _ := ret[0].(Registry)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Init indicates an expected call of Init
func (mr *MockInitializerMockRecorder) Init() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockInitializer)(nil).Init))
}
// MockDynamicOptions is a mock of DynamicOptions interface
type MockDynamicOptions struct {
ctrl *gomock.Controller
recorder *MockDynamicOptionsMockRecorder
}
// MockDynamicOptionsMockRecorder is the mock recorder for MockDynamicOptions
type MockDynamicOptionsMockRecorder struct {
mock *MockDynamicOptions
}
// NewMockDynamicOptions creates a new mock instance
func NewMockDynamicOptions(ctrl *gomock.Controller) *MockDynamicOptions {
mock := &MockDynamicOptions{ctrl: ctrl}
mock.recorder = &MockDynamicOptionsMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockDynamicOptions) EXPECT() *MockDynamicOptionsMockRecorder {
return m.recorder
}
// Validate mocks base method
func (m *MockDynamicOptions) Validate() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Validate")
ret0, _ := ret[0].(error)
return ret0
}
// Validate indicates an expected call of Validate
func (mr *MockDynamicOptionsMockRecorder) Validate() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockDynamicOptions)(nil).Validate))
}
// SetInstrumentOptions mocks base method
func (m *MockDynamicOptions) SetInstrumentOptions(value instrument.Options) DynamicOptions {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetInstrumentOptions", value)
ret0, _ := ret[0].(DynamicOptions)
return ret0
}
// SetInstrumentOptions indicates an expected call of SetInstrumentOptions
func (mr *MockDynamicOptionsMockRecorder) SetInstrumentOptions(value interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInstrumentOptions", reflect.TypeOf((*MockDynamicOptions)(nil).SetInstrumentOptions), value)
}
// InstrumentOptions mocks base method
func (m *MockDynamicOptions) InstrumentOptions() instrument.Options {
m.ctrl.T.Helper()
|
ret := m.ctrl.Call(m, "InstrumentOptions")
ret0, _ := ret[0].(instrument.Options)
return ret0
}
// InstrumentOptions indicates an expected call of InstrumentOptions
func (mr *MockDynamicOptionsMockRecorder) InstrumentOptions() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstrumentOptions", reflect.TypeOf((*MockDynamicOptions)(nil).InstrumentOptions))
}
// SetConfigServiceClient mocks base method
func (m *MockDynamicOptions) SetConfigServiceClient(c client.Client) DynamicOptions {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetConfigServiceClient", c)
ret0, _ := ret[0].(DynamicOptions)
return ret0
}
// SetConfigServiceClient indicates an expected call of SetConfigServiceClient
func (mr *MockDynamicOptionsMockRecorder) SetConfigServiceClient(c interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetConfigServiceClient", reflect.TypeOf((*MockDynamicOptions)(nil).SetConfigServiceClient), c)
}
// ConfigServiceClient mocks base method
func (m *MockDynamicOptions) ConfigServiceClient() client.Client {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ConfigServiceClient")
ret0, _ := ret[0].(client.Client)
return ret0
}
// ConfigServiceClient indicates an expected call of ConfigServiceClient
func (mr *MockDynamicOptionsMockRecorder) ConfigServiceClient() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigServiceClient", reflect.TypeOf((*MockDynamicOptions)(nil).ConfigServiceClient))
}
// SetNamespaceRegistryKey mocks base method
func (m *MockDynamicOptions) SetNamespaceRegistryKey(k string) DynamicOptions {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetNamespaceRegistryKey", k)
ret0, _ := ret[0].(DynamicOptions)
return ret0
}
// SetNamespaceRegistryKey indicates an expected call of SetNamespaceRegistryKey
func (mr *MockDynamicOptionsMockRecorder) SetNamespaceRegistryKey(k interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNamespaceRegistryKey", reflect.TypeOf((*MockDynamicOptions)(nil).SetNamespaceRegistryKey), k)
}
// NamespaceRegistryKey mocks base method
func (m *MockDynamicOptions) NamespaceRegistryKey() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NamespaceRegistryKey")
ret0, _ := ret[0].(string)
return ret0
}
// NamespaceRegistryKey indicates an expected call of NamespaceRegistryKey
func (mr *MockDynamicOptionsMockRecorder) NamespaceRegistryKey() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NamespaceRegistryKey", reflect.TypeOf((*MockDynamicOptions)(nil).NamespaceRegistryKey))
}
| |
reader.go
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package file
import (
"bufio"
"context"
"fmt"
"io"
"os"
"path/filepath"
"go.uber.org/zap"
"golang.org/x/text/encoding"
"golang.org/x/text/transform"
"github.com/open-telemetry/opentelemetry-log-collection/errors"
)
// Reader manages a single file
type Reader struct {
Fingerprint *Fingerprint
Offset int64
Path string
generation int
fileInput *InputOperator
file *os.File
decoder *encoding.Decoder
decodeBuffer []byte
*zap.SugaredLogger `json:"-"`
}
// NewReader creates a new file reader
func (f *InputOperator) NewReader(path string, file *os.File, fp *Fingerprint) (*Reader, error) {
r := &Reader{
Fingerprint: fp,
file: file,
Path: path,
fileInput: f,
SugaredLogger: f.SugaredLogger.With("path", path),
decoder: f.encoding.NewDecoder(),
decodeBuffer: make([]byte, 1<<12),
}
return r, nil
}
// Copy creates a deep copy of a Reader
func (f *Reader) Copy(file *os.File) (*Reader, error) {
reader, err := f.fileInput.NewReader(f.Path, file, f.Fingerprint.Copy())
if err != nil {
return nil, err
}
reader.Offset = f.Offset
return reader, nil
}
// InitializeOffset sets the starting offset
func (f *Reader) InitializeOffset(startAtBeginning bool) error {
if !startAtBeginning {
info, err := f.file.Stat()
if err != nil {
return fmt.Errorf("stat: %s", err)
}
f.Offset = info.Size()
}
return nil
}
// ReadToEnd will read until the end of the file
func (f *Reader) ReadToEnd(ctx context.Context) {
defer f.file.Close()
if _, err := f.file.Seek(f.Offset, 0); err != nil {
f.Errorw("Failed to seek", zap.Error(err))
return
}
fr := NewFingerprintUpdatingReader(f.file, f.Offset, f.Fingerprint, f.fileInput.fingerprintSize)
scanner := NewPositionalScanner(fr, f.fileInput.MaxLogSize, f.Offset, f.fileInput.SplitFunc)
// Iterate over the tokenized file, emitting entries as we go
for {
select {
case <-ctx.Done():
return
default:
}
ok := scanner.Scan()
if !ok {
if err := getScannerError(scanner); err != nil {
f.Errorw("Failed during scan", zap.Error(err))
}
break
}
if err := f.emit(ctx, scanner.Bytes()); err != nil
|
f.Offset = scanner.Pos()
}
}
// Emit creates an entry with the decoded message and sends it to the next
// operator in the pipeline
func (f *Reader) emit(ctx context.Context, msgBuf []byte) error {
// Skip the entry if it's empty
if len(msgBuf) == 0 {
return nil
}
msg, err := f.decode(msgBuf)
if err != nil {
return fmt.Errorf("decode: %s", err)
}
e, err := f.fileInput.NewEntry(msg)
if err != nil {
return fmt.Errorf("create entry: %s", err)
}
if err := e.Set(f.fileInput.FilePathField, f.Path); err != nil {
return err
}
if err := e.Set(f.fileInput.FileNameField, filepath.Base(f.Path)); err != nil {
return err
}
f.fileInput.Write(ctx, e)
return nil
}
// decode converts the bytes in msgBuf to utf-8 from the configured encoding
func (f *Reader) decode(msgBuf []byte) (string, error) {
for {
f.decoder.Reset()
nDst, _, err := f.decoder.Transform(f.decodeBuffer, msgBuf, true)
if err != nil && err == transform.ErrShortDst {
f.decodeBuffer = make([]byte, len(f.decodeBuffer)*2)
continue
} else if err != nil {
return "", fmt.Errorf("transform encoding: %s", err)
}
return string(f.decodeBuffer[:nDst]), nil
}
}
func getScannerError(scanner *PositionalScanner) error {
err := scanner.Err()
if err == bufio.ErrTooLong {
return errors.NewError("log entry too large", "increase max_log_size or ensure that multiline regex patterns terminate")
} else if err != nil {
return errors.Wrap(err, "scanner error")
}
return nil
}
// NewFingerprintUpdatingReader creates a new FingerprintUpdatingReader starting starting at the given offset
func NewFingerprintUpdatingReader(r io.Reader, offset int64, f *Fingerprint, fingerprintSize int) *FingerprintUpdatingReader {
return &FingerprintUpdatingReader{
fingerprint: f,
fingerprintSize: fingerprintSize,
reader: r,
offset: offset,
}
}
// FingerprintUpdatingReader wraps another reader, and updates the fingerprint
// with each read in the first fingerPrintSize bytes
type FingerprintUpdatingReader struct {
fingerprint *Fingerprint
fingerprintSize int
reader io.Reader
offset int64
}
// Read reads from the wrapped reader, saving the read bytes to the fingerprint
func (f *FingerprintUpdatingReader) Read(dst []byte) (int, error) {
if len(f.fingerprint.FirstBytes) == f.fingerprintSize {
return f.reader.Read(dst)
}
n, err := f.reader.Read(dst)
appendCount := min0(n, f.fingerprintSize-int(f.offset))
f.fingerprint.FirstBytes = append(f.fingerprint.FirstBytes[:f.offset], dst[:appendCount]...)
f.offset += int64(n)
return n, err
}
func min0(a, b int) int {
if a < 0 || b < 0 {
return 0
}
if a < b {
return a
}
return b
}
|
{
f.Error("Failed to emit entry", zap.Error(err))
}
|
options.go
|
package nsqlookupd
import (
"log"
"os"
"time"
)
type nsqlookupdOptions struct {
Verbose bool `flag:"verbose"`
TCPAddress string `flag:"tcp-address"`
HTTPAddress string `flag:"http-address"`
TCPBroadcastAddress string `flag:"tcp-broadcast-address"`
HTTPBroadcastAddress string `flag:"http-broadcast-address"`
InactiveProducerTimeout time.Duration `flag:"inactive-producer-timeout"`
TombstoneLifetime time.Duration `flag:"tombstone-lifetime"`
Logger logger
}
func NewNSQLookupdOptions() *nsqlookupdOptions {
hostname, err := os.Hostname()
if err != nil
|
return &nsqlookupdOptions{
TCPAddress: "0.0.0.0:4160",
HTTPAddress: "0.0.0.0:4161",
TCPBroadcastAddress: hostname,
HTTPBroadcastAddress: hostname,
InactiveProducerTimeout: 300 * time.Second,
TombstoneLifetime: 45 * time.Second,
Logger: log.New(os.Stderr, "[nsqlookupd] ", log.Ldate|log.Ltime|log.Lmicroseconds),
}
}
|
{
log.Fatal(err)
}
|
main.go
|
package main
import (
"errors"
"fmt"
"google.golang.org/grpc"
"log"
"net"
"os"
"os/signal"
"runtime/debug"
"syscall"
"telegram_boxes/services/core/app"
"telegram_boxes/services/core/app/admin"
"telegram_boxes/services/core/app/box"
"telegram_boxes/services/core/app/db"
slog "telegram_boxes/services/core/app/log"
"telegram_boxes/services/core/protobuf"
)
func main() {
logger, errLogger := slog.CreateLogger(os.Getenv("LOGS_HOST"), os.Getenv("LOGS_PORT"))
if errLogger != nil {
log.Fatal(errLogger)
return
}
dbConnect, err := db.InitDatabaseConnect(
os.Getenv("MONGO_HOST"), os.Getenv("MONGO_PORT"),
os.Getenv("MONGO_USERNAME"), os.Getenv("MONGO_PASSWORD"),
os.Getenv("MONGO_DATABASE"), os.Getenv("MONGO_MECHANISM"),
)
if err != nil {
_ = logger.System(err.Error())
return
}
defer dbConnect.Close()
adminClient, errAdmin := admin.CreateClient(os.Getenv("ADMIN_HOST"), os.Getenv("ADMIN_PORT"))
if errAdmin != nil {
_ = logger.System(errAdmin.Error())
return
}
//Create new server
s := protobuf.CreateServer(dbConnect, logger, adminClient, box.CreateClients(dbConnect))
//
defer recovery(s.Log())
go waitForShutdown(s)
lis, errCreateConn := net.Listen("tcp", fmt.Sprintf(":%s", os.Getenv("CORE_PORT")))
if errCreateConn != nil {
_ = logger.System(fmt.Sprintf("failed to listen: %v", err))
return
}
GRPCServer := grpc.NewServer(
grpc.UnaryInterceptor(logger.Interceptor),
)
protobuf.RegisterServersServer(GRPCServer, s)
protobuf.RegisterTasksServer(GRPCServer, s)
_ = logger.System(fmt.Sprintf("Protobuf CORE started on :%s", os.Getenv("CORE_PORT")))
err = GRPCServer.Serve(lis)
if err != nil {
_ = logger.System(fmt.Sprintf("failed to serve: %s" + err.Error()))
}
return
}
func
|
(b protobuf.MainServer) {
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
// Block until we receive our signal.
<-interruptChan
session := b.DB().GetMainSession().Clone()
defer session.Close()
servers , _ := b.DB().Models().Bots().GetAll(session)
for _ , s := range servers {
s.Status = app.StatusFatal.String()
b.DB().Models().Bots().UpdateBot(s,session)
_ = b.Admin().SendError(s.Status, s.UserName, "ะัะบะปััะตะฝะธะต ัะดัะฐ")
}
os.Exit(0)
}
//Recovery application out of panic
func recovery(l slog.Client) {
var err error
r := recover()
if r != nil {
switch t := r.(type) {
case string:
err = errors.New(t)
case error:
err = t
default:
err = errors.New("Unknown error ")
}
_ = l.System("RECOVERY :" + err.Error() + "\n" + string(debug.Stack()))
}
}
|
waitForShutdown
|
$.tsx
|
import { ButtonGroup } from '@trussworks/react-uswds'
import type { LoaderFunction } from '@remix-run/node'
import { Link } from '@remix-run/react'
export const loader: LoaderFunction = function () {
throw new Response(null, { status: 404 })
}
export function CatchBoundary() {
return (
|
<h1>Error 404: Page not found</h1>
<p className="usa-intro">
We're sorry, we can't find the page you're looking for. It might have
been removed, changed its name, or is otherwise unavailable.
</p>
<p>
Visit our homepage for helpful tools and resources, or contact us and
we'll point you in the right direction.
</p>
<ButtonGroup>
<Link to="/" className="usa-button">
Visit homepage
</Link>
<a
href="https://heasarc.gsfc.nasa.gov/cgi-bin/Feedback?selected=kafkagcn"
className="usa-button usa-button--outline"
>
Contact us
</a>
</ButtonGroup>
</>
)
}
export default function () {}
|
<>
|
bitcoin_ky.ts
|
<?xml version="1.0" ?><!DOCTYPE TS><TS language="ky" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Mavro</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+39"/>
<source><b>Mavro</b> version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Copyright ยฉ 2009-2014 The Bitcoin developers
Copyright ยฉ 2012-2014 The NovaCoin developers
Copyright ยฉ 2014 The Mavro developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>ะะฐาฃ ะดะฐัะตะบัะธ ะถะฐัะพะพ</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-46"/>
<source>These are your Mavro addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<source>&Copy Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Mavro address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Verify a message to ensure it was signed with a specified Mavro address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>ำจ&ัาฏัาฏาฏ</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>ะะฐัะตะบ</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(ะฐัั ะถะพะบ)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+35"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-58"/>
<source>Mavro will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+282"/>
<source>Sign &message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+251"/>
<source>Synchronizing with network...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-319"/>
<source>&Overview</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&ะขัะฐะฝะทะฐะบัะธัะปะฐั</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-13"/>
<source>&Receive coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-7"/>
<source>&Send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>E&xit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Show information about Mavro</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+259"/>
<source>~%n block(s) remaining</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-256"/>
<source>&Export...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-64"/>
<source>Send coins to a Mavro address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Modify configuration options for Mavro</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup wallet to another location</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>ะะธะปะดะธัาฏาฏะฝาฏ &ัะตะบัะตัาฏาฏ...</translation>
</message>
<message>
<location line="-202"/>
<source>Mavro</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet</source>
<translation>ะะฐะฟััะบ</translation>
</message>
<message>
<location line="+180"/>
<source>&About Mavro</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>&File</source>
<translation>&ะคะฐะนะป</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>&ะะฐัะดะฐะผ</translation>
</message>
<message>
<location line="+12"/>
<source>Tabs toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Actions toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+9"/>
<source>[testnet]</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+60"/>
<source>Mavro client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+75"/>
<source>%n active connection(s) to Mavro network</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+40"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+413"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-403"/>
<source>%n second(s) ago</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="-312"/>
<source>About Mavro card</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show information about Mavro card</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+297"/>
<source>%n minute(s) ago</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s) ago</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s) ago</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Up to date</source>
<translation>ะะฐาฃัะปะฐะฝะณะฐะฝ</translation>
</message>
<message>
<location line="+7"/>
<source>Catching up...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Last received block was generated %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid Mavro address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+76"/>
<source>%n second(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+18"/>
<source>Not staking</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+109"/>
<source>A fatal error occurred. Mavro can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+90"/>
<source>Network Alert</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+551"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>ะะฐัะตะบ</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(ะฐัั ะถะพะบ)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&ะะฐัะตะบ</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+20"/>
<source>New receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Mavro address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+420"/>
<location line="+12"/>
<source>Mavro-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Mavro after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Mavro on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Detach databases at shutdown</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>&ะขะฐัะผะฐะบ</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Mavro client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Connect to the Mavro network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&ะะพัั:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&ะขะตัะตะทะต</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Mavro.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Whether to show Mavro addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Whether to show coin control features or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&ะะฐัะฐะนั</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&ะะพะบะบะพ ััะณะฐััั</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+55"/>
<source>default</source>
<translation>ะถะฐัััะปะฐะฝะฑะฐะณะฐะฝ</translation>
</message>
<message>
<location line="+149"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Mavro.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+33"/>
<location line="+231"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Mavro network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-160"/>
<source>Stake:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-107"/>
<source>Wallet</source>
<translation>ะะฐะฟััะบ</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Total:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-108"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+113"/>
<location line="+1"/>
<source>out of sync</source>
<translation>ัะธะฝั
ัะพะฝะดะพัััััะปะณะฐะฝ ัะผะตั</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+348"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&ะััั</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Mavro-Qt help message to get a list with possible Mavro command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&ะะพะฝัะพะปั</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-104"/>
<source>Mavro - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Mavro Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Open the Mavro debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>ะะพะฝัะพะปะดั ัะฐะทะฐะปะพะพ</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-33"/>
<source>Welcome to the Mavro RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 hack</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>&ะะฐัะดัะณัะฝ ัะฐะทะฐะปะพะพ</translation>
</message>
<message>
<location line="+28"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>123.456 hack</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&ะำฉะฝำฉัาฏาฏ</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-173"/>
<source>Enter a Mavro address (e.g. BinCoinfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
|
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+251"/>
<source>WARNING: Invalid Mavro address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(ะฐัั ะถะพะบ)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<location filename="../sendcoinsentry.cpp" line="+25"/>
<source>Enter a label for this address to add it to your address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to send the payment to (e.g. BinCoinfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>ะะฐัะตะบัะธ ะฐะปะผะฐััั ะฑััะตัะธะฝะตะฝ ะบะพัั</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Mavro address (e.g. BinCoinfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. BinCoinfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>ะะฐัะตะบัะธ ะฐะปะผะฐััั ะฑััะตัะธะฝะตะฝ ะบะพัั</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Mavro address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>&ะะฐัะดัะณัะฝ ัะฐะทะฐะปะพะพ</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. BinCoinfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Mavro address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Mavro address (e.g. BinCoinfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Enter Mavro signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+19"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-2"/>
<source>Open for %n block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+8"/>
<source>conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1/ัะฐัะผะฐะบัะฐ ัะผะตั</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>ะะธะปะดะธัาฏาฏ</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-211"/>
<source>, has not been successfully broadcast yet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>unknown</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+226"/>
<source>Date</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>ะะฐัะตะบ</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+190"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+55"/>
<location line="+16"/>
<source>All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+144"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>ะะฐัะตะบ</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+206"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+33"/>
<source>Mavro version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or BinCoind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: Mavro.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: BinCoind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 15714 or testnet: 25714)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Stake your coins to support network and gain reward (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+109"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 25715)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-11"/>
<source>Accept command line and JSON-RPC commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+101"/>
<source>Error: Transaction creation failed </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-8"/>
<source>Importing blockchain data file.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Importing bootstrap blockchain data file.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-88"/>
<source>Run in the background as a daemon and accept commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-38"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+117"/>
<source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-20"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+61"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Mavro will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-31"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-30"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-62"/>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+94"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-90"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+83"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-82"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-74"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-42"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+109"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<source>Username for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+57"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-48"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-54"/>
<source>Password for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=BinCoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Mavro Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+53"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-158"/>
<source>This help message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+95"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot obtain a lock on data directory %s. Mavro is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-98"/>
<source>Mavro</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+140"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-130"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+122"/>
<source>Loading addresses...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<source>Error loading blkindex.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of Mavro</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart Mavro to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-24"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<source>Error: could not start node</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-34"/>
<source>Loading block index...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-103"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+122"/>
<source>Unable to bind to %s on this computer. Mavro is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-97"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Loading wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot initialize keypool</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Done loading</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-167"/>
<source>To use the %s option</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Error</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<location line="+6"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"/>
</message>
</context>
</TS>
|
<message>
|
typeFromAST.ts
|
import {
DefinitionNode,
EnumTypeDefinitionNode,
FieldDefinitionNode,
GraphQLEnumType,
GraphQLInputObjectType,
GraphQLInterfaceType,
GraphQLNamedType,
GraphQLObjectType,
GraphQLScalarType,
GraphQLUnionType,
InputObjectTypeDefinitionNode,
InputValueDefinitionNode,
InterfaceTypeDefinitionNode,
Kind,
ObjectTypeDefinitionNode,
ScalarTypeDefinitionNode,
UnionTypeDefinitionNode,
GraphQLDirective,
DirectiveDefinitionNode,
DirectiveLocationEnum,
DirectiveLocation,
GraphQLFieldConfig,
StringValueNode,
Location,
TokenKind,
GraphQLEnumValueConfigMap,
GraphQLFieldConfigArgumentMap,
valueFromASTUntyped,
EnumValueDefinitionNode,
getDirectiveValues,
GraphQLDeprecatedDirective,
} from 'graphql';
import { createStub, createNamedStub, Maybe } from '@graphql-tools/utils';
const backcompatOptions = { commentDescriptions: true };
export default function typeFromAST(node: DefinitionNode): GraphQLNamedType | GraphQLDirective | null {
switch (node.kind) {
case Kind.OBJECT_TYPE_DEFINITION:
return makeObjectType(node);
case Kind.INTERFACE_TYPE_DEFINITION:
return makeInterfaceType(node);
case Kind.ENUM_TYPE_DEFINITION:
return makeEnumType(node);
case Kind.UNION_TYPE_DEFINITION:
return makeUnionType(node);
case Kind.SCALAR_TYPE_DEFINITION:
return makeScalarType(node);
case Kind.INPUT_OBJECT_TYPE_DEFINITION:
return makeInputObjectType(node);
case Kind.DIRECTIVE_DEFINITION:
return makeDirective(node);
default:
return null;
}
}
function makeObjectType(node: ObjectTypeDefinitionNode): GraphQLObjectType {
const config = {
name: node.name.value,
description: getDescription(node, backcompatOptions),
interfaces: () => node.interfaces?.map(iface => createNamedStub(iface.name.value, 'interface')),
fields: () => (node.fields != null ? makeFields(node.fields) : {}),
astNode: node,
};
return new GraphQLObjectType(config);
}
function makeInterfaceType(node: InterfaceTypeDefinitionNode): GraphQLInterfaceType {
const config = {
name: node.name.value,
description: getDescription(node, backcompatOptions),
interfaces: (node as unknown as ObjectTypeDefinitionNode).interfaces?.map(iface =>
createNamedStub(iface.name.value, 'interface')
),
fields: () => (node.fields != null ? makeFields(node.fields) : {}),
astNode: node,
};
return new GraphQLInterfaceType(config);
}
function makeEnumType(node: EnumTypeDefinitionNode): GraphQLEnumType {
const values =
node.values?.reduce<GraphQLEnumValueConfigMap>(
(prev, value) => ({
...prev,
[value.name.value]: {
description: getDescription(value, backcompatOptions),
deprecationReason: getDeprecationReason(value),
astNode: value,
},
}),
{}
) ?? {};
return new GraphQLEnumType({
name: node.name.value,
description: getDescription(node, backcompatOptions),
values,
astNode: node,
});
}
function makeUnionType(node: UnionTypeDefinitionNode): GraphQLUnionType {
return new GraphQLUnionType({
name: node.name.value,
description: getDescription(node, backcompatOptions),
types: () => node.types?.map(type => createNamedStub(type.name.value, 'object')) ?? [],
astNode: node,
});
}
function makeScalarType(node: ScalarTypeDefinitionNode): GraphQLScalarType {
return new GraphQLScalarType({
name: node.name.value,
description: getDescription(node, backcompatOptions),
astNode: node,
// TODO: serialize default property setting can be dropped once
// upstream graphql-js TypeScript typings are updated, likely in v16
serialize: value => value,
});
}
function makeInputObjectType(node: InputObjectTypeDefinitionNode): GraphQLInputObjectType {
return new GraphQLInputObjectType({
name: node.name.value,
description: getDescription(node, backcompatOptions),
fields: () => (node.fields ? makeValues(node.fields) : {}),
astNode: node,
});
}
function makeFields(nodes: ReadonlyArray<FieldDefinitionNode>): Record<string, GraphQLFieldConfig<any, any>> {
return nodes.reduce(
(prev, node) => ({
...prev,
[node.name.value]: {
type: createStub(node.type, 'output'),
description: getDescription(node, backcompatOptions),
args: makeValues(node.arguments ?? []),
deprecationReason: getDeprecationReason(node),
astNode: node,
},
}),
{}
);
}
function makeValues(nodes: ReadonlyArray<InputValueDefinitionNode>): GraphQLFieldConfigArgumentMap {
return nodes.reduce(
(prev, node) => ({
...prev,
[node.name.value]: {
type: createStub(node.type, 'input'),
defaultValue: node.defaultValue !== undefined ? valueFromASTUntyped(node.defaultValue) : undefined,
description: getDescription(node, backcompatOptions),
astNode: node,
},
}),
{}
);
}
function makeDirective(node: DirectiveDefinitionNode): GraphQLDirective {
const locations: Array<DirectiveLocationEnum> = [];
for (const location of node.locations) {
if (location.value in DirectiveLocation) {
locations.push(location.value as DirectiveLocationEnum);
}
}
return new GraphQLDirective({
name: node.name.value,
description: node.description != null ? node.description.value : null,
locations,
isRepeatable: node.repeatable,
args: makeValues(node.arguments ?? []),
astNode: node,
});
}
// graphql < v13 does not export getDescription
function getDescription(
node: { description?: StringValueNode; loc?: Location },
options?: { commentDescriptions?: boolean }
): string | undefined {
if (node.description != null) {
return node.description.value;
}
if (options?.commentDescriptions) {
const rawValue = getLeadingCommentBlock(node);
if (rawValue !== undefined) {
return dedentBlockStringValue(`\n${rawValue as string}`);
}
}
}
function
|
(node: { description?: StringValueNode; loc?: Location }): void | string {
const loc = node.loc;
if (!loc) {
return;
}
const comments = [];
let token = loc.startToken.prev;
while (
token != null &&
token.kind === TokenKind.COMMENT &&
token.next != null &&
token.prev != null &&
token.line + 1 === token.next.line &&
token.line !== token.prev.line
) {
const value = String(token.value);
comments.push(value);
token = token.prev;
}
return comments.length > 0 ? comments.reverse().join('\n') : undefined;
}
function dedentBlockStringValue(rawString: string): string {
// Expand a block string's raw value into independent lines.
const lines = rawString.split(/\r\n|[\n\r]/g);
// Remove common indentation from all lines but first.
const commonIndent = getBlockStringIndentation(lines);
if (commonIndent !== 0) {
for (let i = 1; i < lines.length; i++) {
lines[i] = lines[i].slice(commonIndent);
}
}
// Remove leading and trailing blank lines.
while (lines.length > 0 && isBlank(lines[0])) {
lines.shift();
}
while (lines.length > 0 && isBlank(lines[lines.length - 1])) {
lines.pop();
}
// Return a string of the lines joined with U+000A.
return lines.join('\n');
}
/**
* @internal
*/
export function getBlockStringIndentation(lines: ReadonlyArray<string>): number {
let commonIndent = null;
for (let i = 1; i < lines.length; i++) {
const line = lines[i];
const indent = leadingWhitespace(line);
if (indent === line.length) {
continue; // skip empty lines
}
if (commonIndent === null || indent < commonIndent) {
commonIndent = indent;
if (commonIndent === 0) {
break;
}
}
}
return commonIndent === null ? 0 : commonIndent;
}
function leadingWhitespace(str: string) {
let i = 0;
while (i < str.length && (str[i] === ' ' || str[i] === '\t')) {
i++;
}
return i;
}
function isBlank(str: string) {
return leadingWhitespace(str) === str.length;
}
function getDeprecationReason(node: EnumValueDefinitionNode | FieldDefinitionNode): Maybe<string> {
const deprecated = getDirectiveValues(GraphQLDeprecatedDirective, node);
return deprecated?.['reason'];
}
|
getLeadingCommentBlock
|
data_utils.py
|
"""
This is the Data Loading Pipeline for Sentence Classifier Task from
https://github.com/google-research/bert/blob/master/run_classifier.py
"""
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import collections
import sys
sys.path.append(os.path.dirname(__file__))
import tokenization
import tensorflow as tf
class InputExample():
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence.
For single sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second
sequence. Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures():
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class SSTProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
if set_type == 'train' or set_type == 'dev':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[0])
# Single sentence classification, text_b doesn't exist
text_b = None
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
if set_type == 'test':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[1])
# Single sentence classification, text_b doesn't exist
text_b = None
label = '0' # arbitrary set as 0
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type,
tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def
|
(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention rule is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# segment_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# sigment_ids: 0 0 0 0 0 0 0
#
# Where "segment_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
# here we disable the verbose printing of the data
if ex_index < 0:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_ids length: %d" % len(input_ids))
tf.logging.info("input_mask: %s" %\
" ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" %\
" ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal
# percent of tokens from each, since if one sequence is very short then
# each token that's truncated likely contains more information than a
# longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def prepare_TFRecord_data(processor, tokenizer,
data_dir, max_seq_length, output_dir):
"""
Args:
processor: Data Preprocessor, which must have get_lables,
get_train/dev/test/examples methods defined.
tokenizer: The Sentence Tokenizer. Generally should be
SentencePiece Model.
data_dir: The input data directory.
max_seq_length: Max sequence length.
batch_size: mini-batch size.
model: `train`, `eval` or `test`.
output_dir: The directory to save the TFRecord in.
"""
label_list = processor.get_labels()
train_examples = processor.get_train_examples(data_dir)
train_file = os.path.join(output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, max_seq_length,
tokenizer, train_file)
eval_examples = processor.get_dev_examples(data_dir)
eval_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list,
max_seq_length, tokenizer, eval_file)
test_examples = processor.get_test_examples(data_dir)
test_file = os.path.join(output_dir, "predict.tf_record")
file_based_convert_examples_to_features(
test_examples, label_list,
max_seq_length, tokenizer, test_file)
|
_create_examples
|
scaffold-project-and-build.ts
|
/*
* Copyright by LunaSec (owned by Refinery Labs, Inc)
*
* Licensed under the Business Source License v1.1
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* https://github.com/lunasec-io/lunasec/blob/master/licenses/BSL-LunaTrace.txt
*
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
import { db } from '../database/db';
|
const existingProject = await db.oneOrNone(`
SELECT id, name FROM public.projects WHERE name = 'Automatic Project' LIMIT 1
`);
const project_id =
existingProject?.id ||
(await db.one(`INSERT INTO public.projects(name) VALUES ('Automatic Project') RETURNING id `)).id;
// INSERT INTO public.builds(project_id) VALUES ((INSERT INTO public.projects(name) VALUES ('Automatic Project') RETURNING id)) RETURNING id;
return (await db.one(`INSERT INTO public.builds(project_id) VALUES ('${project_id as string}') RETURNING id`))
.id as string;
}
|
export async function scaffoldBuild() {
console.log('scaffolding project with a build and scan');
|
carbonserver.go
|
/*
* Copyright 2013-2016 Fabian Groffen, Damian Gryski, Vladimir Smirnov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package carbonserver
import (
"bufio"
"compress/gzip"
"context"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math"
"net"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"regexp"
"runtime"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
prom "github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"github.com/NYTimes/gziphandler"
"github.com/dgryski/go-expirecache"
"github.com/dgryski/go-trigram"
"github.com/dgryski/httputil"
"github.com/go-graphite/go-carbon/helper"
"github.com/go-graphite/go-carbon/helper/stat"
"github.com/go-graphite/go-carbon/points"
protov3 "github.com/go-graphite/protocol/carbonapi_v3_pb"
"github.com/lomik/zapwriter"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
)
type metricStruct struct {
RenderRequests uint64
RenderErrors uint64
NotFound uint64
FindRequests uint64
FindErrors uint64
FindZero uint64
InfoRequests uint64
InfoErrors uint64
ListRequests uint64
ListErrors uint64
ListQueryRequests uint64
ListQueryErrors uint64
DetailsRequests uint64
DetailsErrors uint64
CacheHit uint64
CacheMiss uint64
CacheRequestsTotal uint64
CacheWorkTimeNS uint64
CacheWaitTimeFetchNS uint64
DiskWaitTimeNS uint64
DiskRequests uint64
PointsReturned uint64
MetricsReturned uint64
MetricsKnown uint64
FileScanTimeNS uint64
IndexBuildTimeNS uint64
MetricsFetched uint64
MetricsFound uint64
FetchSize uint64
QueryCacheHit uint64
QueryCacheMiss uint64
FindCacheHit uint64
FindCacheMiss uint64
TrieNodes uint64
TrieFiles uint64
TrieDirs uint64
TrieCountNodesTimeNs uint64
QuotaApplyTimeNs uint64
UsageRefreshTimeNs uint64
InflightRequests uint64
RejectedTooManyRequests uint64
}
type requestsTimes struct {
sync.RWMutex
list []int64
}
const (
QueryIsPending uint64 = 1 << iota
DataIsAvailable
)
type QueryItem struct {
Data atomic.Value
Flags uint64 // DataIsAvailable or QueryIsPending
QueryFinished chan struct{}
}
// type GlobResponse struct {
type ExpandedGlobResponse struct {
Name string
Files []string
Leafs []bool
Err error
}
var statusCodes = map[string][]uint64{
"combined": make([]uint64, 5),
"find": make([]uint64, 5),
"list": make([]uint64, 5),
"render": make([]uint64, 5),
"details": make([]uint64, 5),
"info": make([]uint64, 5),
"capabilities": make([]uint64, 5),
}
// interface to retrive retention and aggregation
// schema from persister.
type configRetriever interface {
MetricRetentionPeriod(string) (int, bool)
MetricAggrConf(string) (string, float64, bool)
}
type responseWriterWithStatus struct {
http.ResponseWriter
statusCode int
}
func (rw responseWriterWithStatus) statusCodeMajor() int {
return rw.statusCode/100 - 1
}
func newResponseWriterWithStatus(w http.ResponseWriter) *responseWriterWithStatus {
return &responseWriterWithStatus{
w,
http.StatusOK,
}
}
func (w *responseWriterWithStatus) WriteHeader(code int) {
w.statusCode = code
w.ResponseWriter.WriteHeader(code)
}
func (q *QueryItem) FetchOrLock() (interface{}, bool) {
d := q.Data.Load()
if d != nil {
return d, true
}
ok := atomic.CompareAndSwapUint64(&q.Flags, 0, QueryIsPending)
if ok {
// We are the leader now and will be fetching the data
return nil, false
}
select { //nolint:gosimple
// TODO: Add timeout support
case <-q.QueryFinished:
break
}
return q.Data.Load(), true
}
func (q *QueryItem) StoreAbort() {
oldChan := q.QueryFinished
q.QueryFinished = make(chan struct{})
close(oldChan)
atomic.StoreUint64(&q.Flags, 0)
}
func (q *QueryItem) StoreAndUnlock(data interface{}) {
q.Data.Store(data)
atomic.StoreUint64(&q.Flags, DataIsAvailable)
close(q.QueryFinished)
}
type queryCache struct {
ec *expirecache.Cache
}
func (q *queryCache) getQueryItem(k string, size uint64, expire int32) *QueryItem {
emptyQueryItem := &QueryItem{QueryFinished: make(chan struct{})}
return q.ec.GetOrSet(k, emptyQueryItem, size, expire).(*QueryItem)
}
type CarbonserverListener struct {
helper.Stoppable
cacheGet func(key string) []points.Point
readTimeout time.Duration
idleTimeout time.Duration
writeTimeout time.Duration
requestTimeout time.Duration
whisperData string
buckets int
maxGlobs int
emptyResultOk bool
failOnMaxGlobs bool
percentiles []int
scanFrequency time.Duration
forceScanChan chan struct{}
metricsAsCounters bool
tcpListener *net.TCPListener
logger *zap.Logger
accessLogger *zap.Logger
internalStatsDir string
flock bool
compressed bool
removeEmptyFile bool
maxMetricsGlobbed int
maxMetricsRendered int
queryCacheEnabled bool
queryCacheSizeMB int
queryCache queryCache
findCacheEnabled bool
findCache queryCache
trigramIndex bool
trieIndex bool
concurrentIndex bool
fileListCache string
realtimeIndex int
newMetricsChan chan string
fileIdx atomic.Value
fileIdxMutex sync.Mutex
metrics *metricStruct
requestsTimes requestsTimes
exitChan chan struct{}
timeBuckets []uint64
cacheGetRecentMetrics func() []map[string]struct{}
whisperGetConfig configRetriever
prometheus prometheus
db *leveldb.DB
quotas []*Quota
estimateSize func(metric string) (size, dataPoints int64)
quotaAndUsageMetrics chan []points.Points
quotaUsageReportFrequency time.Duration
interfalInfoCallbacks map[string]func() map[string]interface{}
// resource control
MaxInflightRequests uint64 // TODO: to deprecate
NoServiceWhenIndexIsNotReady bool
apiPerPathRatelimiter map[string]*ApiPerPathRatelimiter
globQueryRateLimiters []*GlobQueryRateLimiter
}
type prometheus struct {
enabled bool
requests *prom.CounterVec
request func(string, int)
durations prom.Histogram
duration func(time.Duration)
cacheRequests *prom.CounterVec
cacheRequest func(string, bool)
cacheDurations *prom.HistogramVec
cacheDuration func(string, time.Duration)
diskRequests prom.Counter
diskRequest func()
cancelledRequests prom.Counter
cancelledRequest func()
timeoutRequests prom.Counter
timeoutRequest func()
diskWaitDurations prom.Histogram
diskWaitDuration func(time.Duration)
returnedMetrics prom.Counter
returnedMetric func()
returnedPoints prom.Counter
returnedPoint func(int)
}
func (c *CarbonserverListener) InitPrometheus(reg prom.Registerer) {
c.prometheus = prometheus{
enabled: true,
requests: prom.NewCounterVec(
prom.CounterOpts{
Name: "http_requests_total",
Help: "How many HTTP requests processed, partitioned by status code and handler",
},
[]string{"code", "handler"},
),
cacheRequests: prom.NewCounterVec(
prom.CounterOpts{
Name: "cache_requests_total",
Help: "Cache counts, partitioned by type and hit/miss",
},
[]string{"type", "hit"},
),
durations: prom.NewHistogram(
prom.HistogramOpts{
Name: "http_request_duration_seconds_exp",
Help: "Duration of HTTP requests (exponential buckets)",
Buckets: prom.ExponentialBuckets(time.Millisecond.Seconds(), 2.0, 20),
},
),
cacheDurations: prom.NewHistogramVec(
prom.HistogramOpts{
Name: "cache_duration_seconds_exp",
Help: "Time spent in cache (exponential buckets)",
Buckets: prom.ExponentialBuckets(time.Millisecond.Seconds(), 2.0, 20),
},
[]string{"type"},
),
diskRequests: prom.NewCounter(prom.CounterOpts{
Name: "disk_requests_total",
Help: "Number of times disk has been hit",
}),
cancelledRequests: prom.NewCounter(prom.CounterOpts{
Name: "cancelled_requests_total",
Help: "Number of times a request has been cancelled",
}),
timeoutRequests: prom.NewCounter(prom.CounterOpts{
Name: "timeout_requests_total",
Help: "Number of times a request has been timeout",
}),
diskWaitDurations: prom.NewHistogram(
prom.HistogramOpts{
Name: "disk_wait_seconds_exp",
Help: "Duration of disk wait times (exponential buckets)",
Buckets: prom.ExponentialBuckets(time.Millisecond.Seconds(), 2.0, 20),
},
),
returnedMetrics: prom.NewCounter(prom.CounterOpts{
Name: "returned_metrics_total",
Help: "Number of metrics returned",
}),
returnedPoints: prom.NewCounter(prom.CounterOpts{
Name: "returned_points_total",
Help: "Number of points returned",
}),
}
c.prometheus.request = func(endpoint string, code int) {
c.prometheus.requests.WithLabelValues(strconv.Itoa(code), endpoint).Inc()
}
c.prometheus.cacheRequest = func(kind string, hit bool) {
c.prometheus.cacheRequests.WithLabelValues(kind, strconv.FormatBool(hit))
}
c.prometheus.duration = func(t time.Duration) {
c.prometheus.durations.Observe(t.Seconds())
}
c.prometheus.cacheDuration = func(kind string, t time.Duration) {
c.prometheus.cacheDurations.WithLabelValues(kind).Observe(t.Seconds())
}
c.prometheus.diskRequest = func() {
c.prometheus.diskRequests.Inc()
}
c.prometheus.cancelledRequest = func() {
c.prometheus.cancelledRequests.Inc()
}
c.prometheus.timeoutRequest = func() {
c.prometheus.timeoutRequests.Inc()
}
c.prometheus.diskWaitDuration = func(t time.Duration) {
c.prometheus.diskWaitDurations.Observe(t.Seconds())
}
c.prometheus.returnedMetric = func() {
c.prometheus.returnedMetrics.Inc()
}
c.prometheus.returnedPoint = func(i int) {
c.prometheus.returnedPoints.Add(float64(i))
}
reg.MustRegister(c.prometheus.requests)
reg.MustRegister(c.prometheus.cacheRequests)
reg.MustRegister(c.prometheus.cancelledRequests)
reg.MustRegister(c.prometheus.timeoutRequests)
reg.MustRegister(c.prometheus.durations)
reg.MustRegister(c.prometheus.diskRequests)
reg.MustRegister(c.prometheus.diskWaitDurations)
reg.MustRegister(c.prometheus.returnedMetrics)
reg.MustRegister(c.prometheus.returnedPoints)
}
type metricDetailsFlat struct {
*protov3.MetricDetails
Name string
}
type jsonMetricDetailsResponse struct {
Metrics []metricDetailsFlat
FreeSpace uint64
TotalSpace uint64
}
type fileIndex struct {
typ int //nolint:unused,structcheck
idx trigram.Index
files []string
trieIdx *trieIndex
details map[string]*protov3.MetricDetails
accessTimes map[string]int64
freeSpace uint64
totalSpace uint64
}
func
|
(cacheGetFunc func(key string) []points.Point) *CarbonserverListener {
return &CarbonserverListener{
// Config variables
metrics: &metricStruct{},
metricsAsCounters: false,
cacheGet: cacheGetFunc,
logger: zapwriter.Logger("carbonserver"),
accessLogger: zapwriter.Logger("access"),
findCache: queryCache{ec: expirecache.New(0)},
trigramIndex: true,
percentiles: []int{100, 99, 98, 95, 75, 50},
prometheus: prometheus{
request: func(string, int) {},
duration: func(time.Duration) {},
cacheRequest: func(string, bool) {},
cacheDuration: func(string, time.Duration) {},
diskRequest: func() {},
cancelledRequest: func() {},
timeoutRequest: func() {},
diskWaitDuration: func(time.Duration) {},
returnedMetric: func() {},
returnedPoint: func(int) {},
},
quotaAndUsageMetrics: make(chan []points.Points, 1),
apiPerPathRatelimiter: map[string]*ApiPerPathRatelimiter{},
}
}
func (listener *CarbonserverListener) SetWhisperData(whisperData string) {
listener.whisperData = strings.TrimRight(whisperData, "/")
}
func (listener *CarbonserverListener) SetMaxGlobs(maxGlobs int) {
listener.maxGlobs = maxGlobs
}
func (listener *CarbonserverListener) SetEmptyResultOk(emptyResultOk bool) {
listener.emptyResultOk = emptyResultOk
}
func (listener *CarbonserverListener) SetFailOnMaxGlobs(failOnMaxGlobs bool) {
listener.failOnMaxGlobs = failOnMaxGlobs
}
func (listener *CarbonserverListener) SetMaxMetricsGlobbed(max int) {
listener.maxMetricsGlobbed = max
}
func (listener *CarbonserverListener) SetMaxMetricsRendered(max int) {
listener.maxMetricsRendered = max
}
func (listener *CarbonserverListener) SetFLock(flock bool) {
listener.flock = flock
}
func (listener *CarbonserverListener) SetBuckets(buckets int) {
listener.buckets = buckets
}
func (listener *CarbonserverListener) SetScanFrequency(scanFrequency time.Duration) {
listener.scanFrequency = scanFrequency
}
func (listener *CarbonserverListener) SetQuotaUsageReportFrequency(quotaUsageReportFrequency time.Duration) {
listener.quotaUsageReportFrequency = quotaUsageReportFrequency
}
func (listener *CarbonserverListener) SetReadTimeout(readTimeout time.Duration) {
listener.readTimeout = readTimeout
}
func (listener *CarbonserverListener) SetIdleTimeout(idleTimeout time.Duration) {
listener.idleTimeout = idleTimeout
}
func (listener *CarbonserverListener) SetWriteTimeout(writeTimeout time.Duration) {
listener.writeTimeout = writeTimeout
}
func (listener *CarbonserverListener) SetRequestTimeout(requestTimeout time.Duration) {
listener.requestTimeout = requestTimeout
}
func (listener *CarbonserverListener) SetCompressed(compressed bool) {
listener.compressed = compressed
}
func (listener *CarbonserverListener) SetRemoveEmptyFile(remove bool) {
listener.removeEmptyFile = remove
}
func (listener *CarbonserverListener) SetMetricsAsCounters(metricsAsCounters bool) {
listener.metricsAsCounters = metricsAsCounters
}
func (listener *CarbonserverListener) SetQueryCacheEnabled(enabled bool) {
listener.queryCacheEnabled = enabled
}
func (listener *CarbonserverListener) SetQueryCacheSizeMB(size int) {
listener.queryCacheSizeMB = size
}
func (listener *CarbonserverListener) SetFindCacheEnabled(enabled bool) {
listener.findCacheEnabled = enabled
}
func (listener *CarbonserverListener) SetTrigramIndex(enabled bool) {
listener.trigramIndex = enabled
}
func (listener *CarbonserverListener) SetTrieIndex(enabled bool) {
listener.trieIndex = enabled
}
func (listener *CarbonserverListener) SetCacheGetMetricsFunc(recentMetricsFunc func() []map[string]struct{}) {
listener.cacheGetRecentMetrics = recentMetricsFunc
}
func (listener *CarbonserverListener) SetConfigRetriever(retriever configRetriever) {
listener.whisperGetConfig = retriever
}
func (listener *CarbonserverListener) SetConcurrentIndex(enabled bool) {
listener.concurrentIndex = enabled
}
func (listener *CarbonserverListener) SetRealtimeIndex(num int) chan string {
listener.realtimeIndex = num
listener.newMetricsChan = make(chan string, num)
return listener.newMetricsChan
}
func (listener *CarbonserverListener) SetFileListCache(path string) {
listener.fileListCache = path
}
func (listener *CarbonserverListener) SetInternalStatsDir(dbPath string) {
listener.internalStatsDir = dbPath
}
func (listener *CarbonserverListener) SetPercentiles(percentiles []int) {
listener.percentiles = percentiles
}
func (listener *CarbonserverListener) SetEstimateSize(f func(metric string) (size, dataPoints int64)) {
listener.estimateSize = f
}
func (listener *CarbonserverListener) SetQuotas(quotas []*Quota) {
listener.quotas = quotas
}
func (listener *CarbonserverListener) isQuotaEnabled() bool {
return listener.quotas != nil
}
func (listener *CarbonserverListener) ShouldThrottleMetric(ps *points.Points, inCache bool) bool {
fidx := listener.CurrentFileIndex()
if fidx == nil || fidx.trieIdx == nil {
return false
}
var throttled = fidx.trieIdx.throttle(ps, inCache)
return throttled
}
func (listener *CarbonserverListener) SetMaxInflightRequests(max uint64) {
listener.MaxInflightRequests = max
}
func (listener *CarbonserverListener) SetNoServiceWhenIndexIsNotReady(no bool) {
listener.NoServiceWhenIndexIsNotReady = no
}
func (listener *CarbonserverListener) SetHeavyGlobQueryRateLimiters(rls []*GlobQueryRateLimiter) {
listener.globQueryRateLimiters = rls
}
func (listener *CarbonserverListener) SetAPIPerPathRateLimiter(rls map[string]*ApiPerPathRatelimiter) {
listener.apiPerPathRatelimiter = rls
}
// skipcq: RVV-B0011
func (listener *CarbonserverListener) CurrentFileIndex() *fileIndex {
p := listener.fileIdx.Load()
if p == nil {
return nil
}
return p.(*fileIndex)
}
func (listener *CarbonserverListener) UpdateFileIndex(fidx *fileIndex) { listener.fileIdx.Store(fidx) }
// skipcq: RVV-A0005
func (listener *CarbonserverListener) UpdateMetricsAccessTimes(metrics map[string]int64, initial bool) {
idx := listener.CurrentFileIndex()
if idx == nil {
return
}
listener.fileIdxMutex.Lock()
defer listener.fileIdxMutex.Unlock()
batch := new(leveldb.Batch)
for m, t := range metrics {
if _, ok := idx.details[m]; ok {
idx.details[m].RdTime = t
} else {
idx.details[m] = &protov3.MetricDetails{RdTime: t}
}
idx.accessTimes[m] = t
if !initial && listener.db != nil {
buf := make([]byte, 10)
binary.PutVarint(buf, t)
batch.Put([]byte(m), buf)
}
}
if !initial && listener.db != nil {
err := listener.db.Write(batch, nil)
if err != nil {
listener.logger.Info("Error updating database",
zap.Error(err),
)
}
}
}
func (listener *CarbonserverListener) UpdateMetricsAccessTimesByRequest(metrics []string) {
now := time.Now().Unix()
accessTimes := make(map[string]int64)
for _, m := range metrics {
accessTimes[m] = now
}
listener.UpdateMetricsAccessTimes(accessTimes, false)
}
func splitAndInsert(cacheMetricNames map[string]struct{}, newCacheMetricNames []map[string]struct{}) map[string]struct{} {
// splits each new metric from cache-scan and inserts
// into the current cacheMetricNames map
// in: new.metric.name1 --> split by "."
// insert "/new" , "/new/metric", "/new/metric/name1.wsp" into the
// metricsName map. This is inline with the inserts
// during filescan walk
for _, shardAddMap := range newCacheMetricNames {
for newMetric := range shardAddMap {
split := strings.Split(newMetric, ".")
fileName := "/"
for i, seg := range split {
fileName = filepath.Join(fileName, seg)
if i == len(split)-1 {
fileName += ".wsp"
}
if _, ok := cacheMetricNames[fileName]; !ok {
cacheMetricNames[fileName] = struct{}{}
}
}
}
}
return cacheMetricNames
}
func (listener *CarbonserverListener) fileListUpdater(dir string, scanFrequency <-chan time.Time, force <-chan struct{}, exit <-chan struct{}) {
cacheMetricNames := make(map[string]struct{})
var knownMetricsStatTicker, quotaAndUsageStatTicker <-chan time.Time
if listener.isQuotaEnabled() {
ticker := time.NewTicker(listener.quotaUsageReportFrequency)
defer ticker.Stop()
quotaAndUsageStatTicker = ticker.C
} else if listener.trieIndex && listener.concurrentIndex && listener.realtimeIndex > 0 {
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
knownMetricsStatTicker = ticker.C
}
uloop:
for {
select {
case <-exit:
return
case <-scanFrequency:
case <-force:
case <-knownMetricsStatTicker:
// It's only useful when uisng realtime index as the
// scanFrequency should be a long interval/duration
// like 2 hours or more, and with concurrent and
// realtime index, indexed metrics would grow even without disk scanning.
listener.statKnonwnMetrics(knownMetricsStatTicker)
continue uloop
case <-quotaAndUsageStatTicker:
listener.refreshQuotaAndUsage(quotaAndUsageStatTicker)
continue uloop
case m := <-listener.newMetricsChan:
// listener.newMetricsChan might have high traffic, but
// in theory, there should be no starvation on other channels:
// https://groups.google.com/g/golang-nuts/c/4BR2Sdb6Zzk (2015)
fidx := listener.CurrentFileIndex()
if listener.trieIndex && listener.concurrentIndex && fidx != nil && fidx.trieIdx != nil {
metric := "/" + filepath.Clean(strings.ReplaceAll(m, ".", "/")+".wsp")
if err := fidx.trieIdx.insert(metric, 0, 0, 0); err != nil {
listener.logTrieInsertError(listener.logger, "failed to insert new metrics for realtime indexing", metric, err)
}
}
continue uloop
}
if listener.cacheGetRecentMetrics != nil {
// cacheMetricNames maintains all new metric names added in cache
// when cache-scan is enabled in conf
newCacheMetricNames := listener.cacheGetRecentMetrics()
cacheMetricNames = splitAndInsert(cacheMetricNames, newCacheMetricNames)
}
if listener.updateFileList(dir, cacheMetricNames, quotaAndUsageStatTicker) {
listener.logger.Info("file list updated with cache, starting a new scan immediately")
listener.updateFileList(dir, cacheMetricNames, quotaAndUsageStatTicker)
}
}
}
func (listener *CarbonserverListener) statKnonwnMetrics(knownMetricsStatTicker <-chan time.Time) {
defer func() {
// drain remaining blocked tickers
for {
select {
case <-knownMetricsStatTicker:
default:
return
}
}
}()
fidx := listener.CurrentFileIndex()
if fidx == nil || fidx.trieIdx == nil {
return
}
start := time.Now()
count, files, dirs, _, _, _, _, _ := fidx.trieIdx.countNodes()
atomic.StoreUint64(&listener.metrics.TrieNodes, uint64(count))
atomic.StoreUint64(&listener.metrics.TrieFiles, uint64(files))
atomic.StoreUint64(&listener.metrics.TrieDirs, uint64(dirs))
// set using the indexed files, instead of returning on-disk files.
//
// WHY: with concurrent and realtime index, disk scan should be set at
// am interval like 2 hours or longer. counting the files in trie index
// gives us more timely visibilitty on how many metrics are known now.
atomic.StoreUint64(&listener.metrics.MetricsKnown, uint64(files))
atomic.StoreUint64(&listener.metrics.TrieCountNodesTimeNs, uint64(time.Since(start)))
listener.logger.Debug(
"trieIndex.countNodes",
zap.Duration("trie_count_nodes_time", time.Since(start)),
)
}
func (listener *CarbonserverListener) refreshQuotaAndUsage(quotaAndUsageStatTicker <-chan time.Time) {
defer func() {
// drain remaining blocked tickers
for {
select {
case <-quotaAndUsageStatTicker:
default:
return
}
}
}()
fidx := listener.CurrentFileIndex()
if !listener.isQuotaEnabled() || !listener.concurrentIndex || listener.realtimeIndex <= 0 || fidx == nil || fidx.trieIdx == nil {
return
}
quotaStart := time.Now()
throughputs, err := fidx.trieIdx.applyQuotas(listener.quotaUsageReportFrequency, listener.quotas...)
if err != nil {
listener.logger.Error(
"refreshQuotaAndUsage",
zap.Error(err),
)
}
quotaTime := uint64(time.Since(quotaStart))
atomic.StoreUint64(&listener.metrics.QuotaApplyTimeNs, quotaTime)
usageStart := time.Now()
files := fidx.trieIdx.refreshUsage(throughputs)
usageTime := uint64(time.Since(usageStart))
atomic.StoreUint64(&listener.metrics.UsageRefreshTimeNs, usageTime)
// set using the indexed files, instead of returning on-disk files.
//
// WHY: quota subsystem atm can only be enabled along with concurrent
// and realtime idnex, and with concurrent and realtime index, disk
// scan should be set at an interval like 2 hours or longer. counting
// the files in trie index gives us more timely visibilitty into how
// many metrics are known now.
atomic.StoreUint64(&listener.metrics.MetricsKnown, files)
// WHY select: avoid potential block
select {
case listener.quotaAndUsageMetrics <- fidx.trieIdx.qauMetrics:
default:
}
fidx.trieIdx.qauMetrics = nil
listener.logger.Debug(
"refreshQuotaAndUsage",
zap.Uint64("quota_apply_time", quotaTime),
zap.Uint64("usage_refresh_time", usageTime),
)
}
func (listener *CarbonserverListener) updateFileList(dir string, cacheMetricNames map[string]struct{}, quotaAndUsageStatTicker <-chan time.Time) (readFromCache bool) {
logger := listener.logger.With(zap.String("handler", "fileListUpdated"))
defer func() {
if r := recover(); r != nil {
logger.Error("panic encountered",
zap.Stack("stack"),
zap.Any("error", r),
)
}
}()
var t0 = time.Now()
var fidx = listener.CurrentFileIndex()
var files []string
var filesLen int
var details = make(map[string]*protov3.MetricDetails)
var trieIdx *trieIndex
var metricsKnown uint64
var infos []zap.Field
if listener.trieIndex {
if fidx == nil || !listener.concurrentIndex {
trieIdx = newTrie(".wsp", listener.estimateSize)
} else {
trieIdx = fidx.trieIdx
trieIdx.root.gen++
}
}
// populate index for all the metric names in cache
// the iteration takes place only when cache-scan is enabled in conf
var tcache = time.Now()
var cacheMetricLen = len(cacheMetricNames)
for fileName := range cacheMetricNames {
if listener.trieIndex {
if err := trieIdx.insert(fileName, 0, 0, 0); err != nil {
listener.logTrieInsertError(logger, "error populating index from cache indexMap", fileName, err)
}
} else {
files = append(files, fileName)
}
if strings.HasSuffix(fileName, ".wsp") {
metricsKnown++
}
}
cacheIndexRuntime := time.Since(tcache)
// readFromCache hould only occur once at the start of the program
if fidx == nil && listener.fileListCache != "" {
fileListCache, err := newFileListCache(listener.fileListCache, 'r')
if err != nil {
if !os.IsNotExist(err) {
logger.Error("failed to read file list cache", zap.Error(err))
}
} else {
readFromCache = true
for fileListCache.scanner.Scan() {
entry := fileListCache.scanner.Text()
if entry == "" {
continue
}
if err := trieIdx.insert(entry, 0, 0, 0); err != nil {
listener.logTrieInsertError(logger, "failed to read from file list cache", entry, err)
readFromCache = false
trieIdx = newTrie(".wsp", listener.estimateSize)
break
}
filesLen++
if strings.HasSuffix(entry, ".wsp") {
metricsKnown++
}
}
if err := fileListCache.close(); err != nil {
logger.Error("failed to close file list cache", zap.Error(err))
}
}
}
if !readFromCache {
var flc *fileListCache
if listener.fileListCache != "" {
var err error
flc, err = newFileListCache(listener.fileListCache, 'w')
if err != nil {
if !os.IsNotExist(err) {
logger.Error("failed to create file list cache", zap.Error(err))
}
} else {
defer func() {
// flc could be reset to nil during filepath walk
if flc != nil {
if err := flc.close(); err != nil {
logger.Error("failed to close flie list cache", zap.Error(err))
}
}
}()
}
}
if fi, err := os.Lstat(dir); err != nil {
logger.Error("failed to stat whisper data directory", zap.String("path", dir), zap.Error(err))
} else if fi.Mode()&os.ModeSymlink == 1 {
logger.Error("can't index symlink data dir", zap.String("path", dir))
}
err := filepath.Walk(dir, func(p string, info os.FileInfo, err error) error {
if err != nil {
logger.Info("error processing", zap.String("path", p), zap.Error(err))
return nil
}
// WHY: as filepath.walk could potentially taking a long
// time to complete (>= 5 minutes or more), depending
// on how many files are there on disk. It's nice to
// have consistent quota and usage metrics produced as
// regularly as possible according to the
// quotaUsageReportFrequency specified in the config.
if listener.isQuotaEnabled() {
select {
case <-quotaAndUsageStatTicker:
listener.refreshQuotaAndUsage(quotaAndUsageStatTicker)
default:
}
}
// WHY: as filepath.walk could potentially taking a long
// time to complete (>= 5 minutes or more), depending
// on how many files are there on disk. It's nice to
// try to flush newMetricsChan if possible.
//
// TODO: only trigger enter the loop when it's half full?
// len(listener.newMetricsChan) >= cap(listener.newMetricsChan)/2
if listener.trieIndex && listener.concurrentIndex && listener.newMetricsChan != nil {
newMetricsLoop:
for {
select {
case m := <-listener.newMetricsChan:
fileName := "/" + filepath.Clean(strings.ReplaceAll(m, ".", "/")+".wsp")
if err := trieIdx.insert(fileName, 0, 0, 0); err != nil {
listener.logTrieInsertError(logger, "failed to update realtime trie index", m, err)
}
default:
break newMetricsLoop
}
}
}
isFullMetric := strings.HasSuffix(info.Name(), ".wsp")
if info.IsDir() || isFullMetric {
trimmedName := strings.TrimPrefix(p, listener.whisperData)
filesLen++
if flc != nil {
// TODO: include metadata like physical/logical size, data points
if err := flc.write(trimmedName); err != nil {
logger.Error("failed to write to file list cache", zap.Error(err))
if err := flc.close(); err != nil {
logger.Error("failed to close flie list cache", zap.Error(err))
}
flc = nil
}
}
// use cacheMetricNames to check and prevent appending duplicate metrics
// into the index when cacheMetricNamesIndex is enabled
if _, present := cacheMetricNames[trimmedName]; present {
delete(cacheMetricNames, trimmedName)
} else {
if listener.trieIndex {
// WHY:
// * this would only affects empty directories
// * indexing empty directories causes an strange bug in trie index
// * empty dir isn't useful (at least most of the time)?
if isFullMetric {
var dataPoints int64
if listener.estimateSize != nil {
m := strings.ReplaceAll(trimmedName, "/", ".")
m = m[1 : len(m)-4]
_, dataPoints = listener.estimateSize(m)
}
var physicalSize = info.Size()
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
physicalSize = stat.Blocks * 512
}
if err := trieIdx.insert(trimmedName, info.Size(), physicalSize, dataPoints); err != nil {
// It's better to just log an error than stop indexing
listener.logTrieInsertError(logger, "updateFileList.trie: failed to index path", trimmedName, err)
}
}
} else {
files = append(files, trimmedName)
}
if isFullMetric {
metricsKnown++
}
}
if isFullMetric && listener.internalStatsDir != "" {
i := stat.GetStat(info)
trimmedName = strings.ReplaceAll(trimmedName[1:len(trimmedName)-4], "/", ".")
details[trimmedName] = &protov3.MetricDetails{
Size_: i.Size,
ModTime: i.MTime,
ATime: i.ATime,
RealSize: i.RealSize,
}
}
}
return nil
})
if err != nil {
logger.Error("error getting file list",
zap.Error(err),
)
}
}
if listener.concurrentIndex && trieIdx != nil {
trieIdx.prune()
}
var stat syscall.Statfs_t
if err := syscall.Statfs(dir, &stat); err != nil {
logger.Info("error getting FS Stats",
zap.String("dir", dir),
zap.Error(err),
)
return
}
var freeSpace uint64
// diskspace can be negative and Bavail is therefore int64
if stat.Bavail >= 0 { // nolint:staticcheck // skipcq: SCC-SA4003
freeSpace = uint64(stat.Bavail) * uint64(stat.Bsize)
}
totalSpace := stat.Blocks * uint64(stat.Bsize)
fileScanRuntime := time.Since(t0)
atomic.StoreUint64(&listener.metrics.MetricsKnown, metricsKnown)
atomic.AddUint64(&listener.metrics.FileScanTimeNS, uint64(fileScanRuntime.Nanoseconds()))
nfidx := &fileIndex{
details: details,
freeSpace: freeSpace,
totalSpace: totalSpace,
accessTimes: make(map[string]int64),
}
var pruned int
var indexType = "trigram"
var tindex = time.Now()
var indexSize int
if listener.trieIndex {
indexType = "trie"
nfidx.trieIdx = trieIdx
infos = append(
infos,
zap.Int("trie_depth", int(nfidx.trieIdx.depth)),
zap.String("longest_metric", nfidx.trieIdx.longestMetric),
)
if listener.trigramIndex && !listener.concurrentIndex {
start := time.Now()
nfidx.trieIdx.setTrigrams()
infos = append(infos, zap.Duration("set_trigram_time", time.Since(start)))
}
start := time.Now()
count, files, dirs, _, _, _, _, _ := trieIdx.countNodes()
atomic.StoreUint64(&listener.metrics.TrieNodes, uint64(count))
atomic.StoreUint64(&listener.metrics.TrieFiles, uint64(files))
atomic.StoreUint64(&listener.metrics.TrieDirs, uint64(dirs))
infos = append(infos, zap.Duration("trie_count_nodes_time", time.Since(start)))
indexSize = count
} else {
nfidx.files = files
nfidx.idx = trigram.NewIndex(files)
pruned = nfidx.idx.Prune(0.95)
indexSize = len(nfidx.idx)
}
indexingRuntime := time.Since(tindex) // note: no longer meaningful for trie index
atomic.AddUint64(&listener.metrics.IndexBuildTimeNS, uint64(indexingRuntime.Nanoseconds()))
var tl = time.Now()
if fidx != nil && listener.internalStatsDir != "" {
listener.fileIdxMutex.Lock()
for m := range fidx.accessTimes {
if d, ok := details[m]; ok {
d.RdTime = fidx.accessTimes[m]
} else {
delete(fidx.accessTimes, m)
if listener.db != nil {
listener.db.Delete([]byte(m), nil)
}
}
}
nfidx.accessTimes = fidx.accessTimes
listener.fileIdxMutex.Unlock()
}
var rdTimeUpdateRuntime = time.Since(tl)
listener.UpdateFileIndex(nfidx)
infos = append(infos,
zap.Duration("file_scan_runtime", fileScanRuntime),
zap.Duration("indexing_runtime", indexingRuntime),
zap.Duration("rdtime_update_runtime", rdTimeUpdateRuntime),
zap.Duration("cache_index_runtime", cacheIndexRuntime),
zap.Duration("total_runtime", time.Since(t0)),
zap.Int("Files", filesLen),
zap.Int("index_size", indexSize),
zap.Int("pruned_trigrams", pruned),
zap.Int("cache_metric_len_before", cacheMetricLen),
zap.Int("cache_metric_len_after", len(cacheMetricNames)),
zap.Uint64("metrics_known", metricsKnown),
zap.String("index_type", indexType),
zap.Bool("read_from_cache", readFromCache),
)
logger.Info("file list updated", infos...)
return
}
func (*CarbonserverListener) logTrieInsertError(logger *zap.Logger, msg, metric string, err error) {
zfields := []zap.Field{zap.Error(err), zap.String("metric", metric)}
if ierr, ok := err.(*trieInsertError); ok {
zfields = append(zfields, zap.String("err_info", ierr.info))
}
logger.Error(msg, zfields...)
}
func (listener *CarbonserverListener) expandGlobs(ctx context.Context, query string, resultCh chan<- *ExpandedGlobResponse) {
defer func() {
if err := recover(); err != nil {
resultCh <- &ExpandedGlobResponse{query, nil, nil, fmt.Errorf("%s\n%s", err, debug.Stack())}
}
}()
// Rate limit heavy globbing queries like: *.*.*.*keyword*.
//
// Why: it's expensive to scan the whole index while looking for
// keywords, especially for trie and file system glob.
for _, rl := range listener.globQueryRateLimiters {
if !rl.pattern.MatchString(query) {
continue
}
if cap(rl.maxInflightRequests) == 0 {
err := fmt.Errorf("rejected by query rate limiter: %s", rl.pattern.String())
resultCh <- &ExpandedGlobResponse{query, nil, nil, err}
return
}
rl.maxInflightRequests <- struct{}{}
defer func() {
<-rl.maxInflightRequests
}()
// why: no need to continue execution if the request is already timeout.
select {
case <-ctx.Done():
switch ctx.Err() {
case context.DeadlineExceeded:
listener.prometheus.timeoutRequest()
case context.Canceled:
listener.prometheus.cancelledRequest()
}
err := fmt.Errorf("time out due to heavy glob query rate limiter: %s", rl.pattern.String())
resultCh <- &ExpandedGlobResponse{query, nil, nil, err}
return
default:
}
break
}
logger := TraceContextToZap(ctx, listener.logger)
matchedCount := 0
defer func(start time.Time) {
dur := time.Now().Sub(start) //nolint:gosimple
if dur <= time.Second {
return
}
var itype string
if listener.trieIndex {
itype = "trie"
if listener.trigramIndex {
itype = "trie-trigram"
}
} else if listener.trigramIndex {
itype = "trigram"
}
logger.Info("slow_expand_globs", zap.Duration("time", dur), zap.String("query", query), zap.Int("matched_count", matchedCount), zap.String("index_type", itype))
}(time.Now())
if listener.trieIndex && listener.CurrentFileIndex() != nil {
files, leafs, err := listener.expandGlobsTrie(query)
resultCh <- &ExpandedGlobResponse{query, files, leafs, err}
return
}
var useGlob bool
// TODO: Find out why we have set 'useGlob' if 'star == -1'
if star := strings.IndexByte(query, '*'); listener.cacheGetRecentMetrics == nil &&
strings.IndexByte(query, '[') == -1 &&
strings.IndexByte(query, '?') == -1 &&
(star == -1 || star == len(query)-1) {
useGlob = true
}
logger = logger.With(zap.Bool("use_glob", useGlob))
/* things to glob:
* - carbon.relays -> carbon.relays
* - carbon.re -> carbon.relays, carbon.rewhatever
* - carbon.[rz] -> carbon.relays, carbon.zipper
* - carbon.{re,zi} -> carbon.relays, carbon.zipper
* - match is either dir or .wsp file
* unfortunately, filepath.Glob doesn't handle the curly brace
* expansion for us */
query = strings.ReplaceAll(query, ".", "/")
var globs []string
if !strings.HasSuffix(query, "*") {
globs = append(globs, query+".wsp")
logger.Debug("appending file to globs struct",
zap.Strings("globs", globs),
)
}
globs = append(globs, query)
globs, err := listener.expandGlobBraces(globs)
if err != nil {
resultCh <- &ExpandedGlobResponse{query, nil, nil, err}
return
}
fidx := listener.CurrentFileIndex()
var files []string
fallbackToFS := false
if !listener.trigramIndex || fidx == nil || len(fidx.files) == 0 {
fallbackToFS = true
}
if fidx != nil && !useGlob {
// use the index
docs := make(map[trigram.DocID]struct{})
for _, g := range globs {
gpath := "/" + g
ts := extractTrigrams(g)
// TODO(dgryski): If we have 'not enough trigrams' we
// should bail and use the file-system glob instead
ids := fidx.idx.QueryTrigrams(ts)
for _, id := range ids {
docid := trigram.DocID(id)
if _, ok := docs[docid]; !ok {
matched, err := filepath.Match(gpath, fidx.files[id])
if err == nil && matched {
docs[docid] = struct{}{}
}
}
}
}
for id := range docs {
files = append(files, listener.whisperData+fidx.files[id])
}
sort.Strings(files)
}
// Not an 'else' clause because the trigram-searching code might want
// to fall back to the file-system glob
if useGlob || fallbackToFS {
// no index or we were asked to hit the filesystem
for _, g := range globs {
nfiles, err := filepath.Glob(listener.whisperData + "/" + g)
if err == nil {
files = append(files, nfiles...)
}
}
}
leafs := make([]bool, len(files))
for i, p := range files {
s, err := os.Stat(p)
switch {
case err == nil:
// exists on disk
p = p[len(listener.whisperData+"/"):]
if !s.IsDir() && strings.HasSuffix(p, ".wsp") {
p = p[:len(p)-4]
leafs[i] = true
} else {
leafs[i] = false
}
files[i] = strings.ReplaceAll(p, "/", ".")
case os.IsNotExist(err):
// cache-only, so no fileinfo
// mark "leafs" based on wsp suffix
p = p[len(listener.whisperData+"/"):]
if strings.HasSuffix(p, ".wsp") {
p = p[:len(p)-4]
leafs[i] = true
} else {
leafs[i] = false
}
files[i] = strings.ReplaceAll(p, "/", ".")
default:
continue
}
}
matchedCount = len(files)
resultCh <- &ExpandedGlobResponse{query, files, leafs, nil}
}
// TODO(dgryski): add tests
func (listener *CarbonserverListener) expandGlobBraces(globs []string) ([]string, error) {
for {
bracematch := false
var newglobs []string
for _, glob := range globs {
lbrace := strings.Index(glob, "{")
rbrace := -1
if lbrace > -1 {
rbrace = strings.Index(glob[lbrace:], "}")
if rbrace > -1 {
rbrace += lbrace
}
}
if lbrace > -1 && rbrace > -1 {
bracematch = true
expansion := glob[lbrace+1 : rbrace]
parts := strings.Split(expansion, ",")
for _, sub := range parts {
if len(newglobs) > listener.maxGlobs {
if listener.failOnMaxGlobs {
return nil, errMaxGlobsExhausted
}
break
}
newglobs = append(newglobs, glob[:lbrace]+sub+glob[rbrace+1:])
}
} else {
if len(newglobs) > listener.maxGlobs {
if listener.failOnMaxGlobs {
return nil, errMaxGlobsExhausted
}
break
}
newglobs = append(newglobs, glob)
}
}
globs = newglobs
if !bracematch {
break
}
}
return globs, nil
}
func (listener *CarbonserverListener) Stat(send helper.StatCallback) {
senderRaw := helper.SendUint64
sender := helper.SendAndSubstractUint64
if listener.metricsAsCounters {
sender = helper.SendUint64
}
var m runtime.MemStats
runtime.ReadMemStats(&m)
pauseNS := uint64(m.PauseTotalNs)
alloc := uint64(m.Alloc)
totalAlloc := uint64(m.TotalAlloc)
numGC := uint64(m.NumGC)
sender("render_requests", &listener.metrics.RenderRequests, send)
sender("render_errors", &listener.metrics.RenderErrors, send)
sender("notfound", &listener.metrics.NotFound, send)
sender("find_requests", &listener.metrics.FindRequests, send)
sender("find_errors", &listener.metrics.FindErrors, send)
sender("find_zero", &listener.metrics.FindZero, send)
sender("list_requests", &listener.metrics.ListRequests, send)
sender("list_errors", &listener.metrics.ListErrors, send)
sender("details_requests", &listener.metrics.DetailsRequests, send)
sender("details_errors", &listener.metrics.DetailsErrors, send)
sender("cache_hit", &listener.metrics.CacheHit, send)
sender("cache_miss", &listener.metrics.CacheMiss, send)
sender("cache_work_time_ns", &listener.metrics.CacheWorkTimeNS, send)
sender("cache_wait_time_fetch_ns", &listener.metrics.CacheWaitTimeFetchNS, send)
sender("cache_requests", &listener.metrics.CacheRequestsTotal, send)
sender("disk_wait_time_ns", &listener.metrics.DiskWaitTimeNS, send)
sender("disk_requests", &listener.metrics.DiskRequests, send)
sender("points_returned", &listener.metrics.PointsReturned, send)
sender("metrics_returned", &listener.metrics.MetricsReturned, send)
sender("metrics_found", &listener.metrics.MetricsFound, send)
sender("fetch_size_bytes", &listener.metrics.FetchSize, send)
senderRaw("metrics_known", &listener.metrics.MetricsKnown, send)
sender("index_build_time_ns", &listener.metrics.IndexBuildTimeNS, send)
sender("file_scan_time_ns", &listener.metrics.FileScanTimeNS, send)
sender("query_cache_hit", &listener.metrics.QueryCacheHit, send)
sender("query_cache_miss", &listener.metrics.QueryCacheMiss, send)
sender("find_cache_hit", &listener.metrics.FindCacheHit, send)
sender("find_cache_miss", &listener.metrics.FindCacheMiss, send)
sender("inflight_requests_count", &listener.metrics.InflightRequests, send)
sender("inflight_requests_limit", &listener.MaxInflightRequests, send)
sender("rejected_too_many_requests", &listener.metrics.RejectedTooManyRequests, send)
if listener.concurrentIndex {
senderRaw("trie_index_nodes", &listener.metrics.TrieNodes, send)
senderRaw("trie_index_files", &listener.metrics.TrieFiles, send)
senderRaw("trie_index_dirs", &listener.metrics.TrieDirs, send)
senderRaw("trie_count_nodes_time_ns", &listener.metrics.TrieCountNodesTimeNs, send)
}
if listener.isQuotaEnabled() {
senderRaw("quota_apply_time_ns", &listener.metrics.QuotaApplyTimeNs, send)
senderRaw("usage_refresh_time_ns", &listener.metrics.UsageRefreshTimeNs, send)
}
sender("alloc", &alloc, send)
sender("total_alloc", &totalAlloc, send)
sender("num_gc", &numGC, send)
sender("pause_ns", &pauseNS, send)
for name, codes := range statusCodes {
for i := range codes {
sender(fmt.Sprintf("request_codes.%s.%vxx", name, i+1), &codes[i], send)
}
}
for i := 0; i <= listener.buckets; i++ {
sender(fmt.Sprintf("requests_in_%dms_to_%dms", i*100, (i+1)*100), &listener.timeBuckets[i], send)
}
// Computing response percentiles
if len(listener.percentiles) > 0 {
listener.requestsTimes.Lock()
list := listener.requestsTimes.list
listener.requestsTimes.list = make([]int64, 0, len(list))
listener.requestsTimes.Unlock()
if len(list) == 0 {
for _, p := range listener.percentiles {
send(fmt.Sprintf("request_time_%vth_percentile_ns", p), 0)
}
} else {
sort.Slice(list, func(i, j int) bool { return list[i] < list[j] })
for _, p := range listener.percentiles {
key := int(float64(p)/100*float64(len(list))) - 1
if key < 0 {
key = 0
}
send(fmt.Sprintf("request_time_%vth_percentile_ns", p), float64(list[key]))
}
}
}
// WHY select: avoid potential block
select {
case qauMetrics := <-listener.quotaAndUsageMetrics:
for _, ps := range qauMetrics {
send(ps.Metric, float64(ps.Data[0].Value))
}
default:
}
}
func (listener *CarbonserverListener) Stop() error {
close(listener.forceScanChan)
close(listener.exitChan)
if listener.db != nil {
listener.db.Close()
}
listener.tcpListener.Close()
return nil
}
func removeDirectory(dir string) error {
// A small safety check, it doesn't cover all the cases, but will help a little bit in case of misconfiguration
switch strings.TrimSuffix(dir, "/") {
case "/", "/etc", "/usr", "/bin", "/sbin", "/lib", "/lib64", "/usr/lib", "/usr/lib64", "/usr/bin", "/usr/sbin", "C:", "C:\\":
return fmt.Errorf("Can't remove system directory: %s", dir)
}
d, err := os.Open(dir)
if err != nil {
return err
}
defer d.Close()
files, err := d.Readdirnames(-1)
if err != nil {
return err
}
for _, f := range files {
err = os.RemoveAll(filepath.Join(dir, f))
if err != nil {
return err
}
}
return nil
}
func (listener *CarbonserverListener) initStatsDB() error {
var err error
if listener.internalStatsDir != "" {
o := &opt.Options{
Filter: filter.NewBloomFilter(10),
}
listener.db, err = leveldb.OpenFile(listener.internalStatsDir, o)
if err != nil {
listener.logger.Error("Can't open statistics database",
zap.Error(err),
)
err = removeDirectory(listener.internalStatsDir)
if err != nil {
listener.logger.Error("Can't remove old statistics database",
zap.Error(err),
)
return err
}
listener.db, err = leveldb.OpenFile(listener.internalStatsDir, o)
if err != nil {
listener.logger.Error("Can't recreate statistics database",
zap.Error(err),
)
return err
}
}
}
return nil
}
func (listener *CarbonserverListener) rateLimitRequest(h http.HandlerFunc) http.HandlerFunc {
return func(wr http.ResponseWriter, req *http.Request) {
// Can't use http.TimeoutHandler here due to supporting per-path timeout
if ratelimiter, ok := listener.apiPerPathRatelimiter[req.URL.Path]; listener.requestTimeout > 0 || (ok && ratelimiter.timeout > 0) {
timeout := listener.requestTimeout
if ok && ratelimiter.timeout > 0 {
timeout = ratelimiter.timeout
}
ctx, cancel := context.WithTimeout(req.Context(), timeout)
defer cancel()
req = req.WithContext(ctx)
}
t0 := time.Now()
ctx := req.Context()
accessLogger := TraceContextToZap(ctx, listener.accessLogger.With(
zap.String("handler", "rate_limit"),
zap.String("url", req.URL.RequestURI()),
zap.String("peer", req.RemoteAddr),
))
if listener.NoServiceWhenIndexIsNotReady && listener.CurrentFileIndex() == nil {
accessLogger.Error("request denied",
zap.Duration("runtime_seconds", time.Since(t0)),
zap.String("reason", "index not ready"),
zap.Int("http_code", http.StatusServiceUnavailable),
)
http.Error(wr, "Service unavailable (index not ready)", http.StatusServiceUnavailable)
return
}
if ratelimiter, ok := listener.apiPerPathRatelimiter[req.URL.Path]; ok {
if cap(ratelimiter.maxInflightRequests) == 0 {
http.Error(wr, "Bad request (blocked by api per path rate limiter)", http.StatusBadRequest)
return
}
ratelimiter.maxInflightRequests <- struct{}{}
defer func() {
select {
case <-ratelimiter.maxInflightRequests:
default:
}
}()
// why: if the request is already timeout, there is no
// need to resume execution.
select {
case <-ctx.Done():
switch ctx.Err() {
case context.DeadlineExceeded:
listener.prometheus.timeoutRequest()
case context.Canceled:
listener.prometheus.cancelledRequest()
}
accessLogger.Error("request timeout due to per url rate limiting",
zap.Duration("runtime_seconds", time.Since(t0)),
zap.String("reason", "timeout due to per url rate limiting"),
zap.Int("http_code", http.StatusRequestTimeout),
)
http.Error(wr, "Bad request (timeout due to maxInflightRequests)", http.StatusRequestTimeout)
return
default:
}
}
// TODO: to deprecate as it's replaced by per-path rate limiting?
//
// rate limit inflight requests
inflights := atomic.AddUint64(&listener.metrics.InflightRequests, 1)
defer atomic.AddUint64(&listener.metrics.InflightRequests, ^uint64(0))
if listener.MaxInflightRequests > 0 && inflights > listener.MaxInflightRequests {
atomic.AddUint64(&listener.metrics.RejectedTooManyRequests, 1)
accessLogger.Error("request denied",
zap.Duration("runtime_seconds", time.Since(t0)),
zap.String("reason", "too many requests"),
zap.Int("http_code", http.StatusTooManyRequests),
)
http.Error(wr, "Bad request (too many requests)", http.StatusTooManyRequests)
return
}
h(wr, req)
}
}
func (listener *CarbonserverListener) Listen(listen string) error {
logger := listener.logger
logger.Info("starting carbonserver",
zap.String("listen", listen),
zap.String("whisperData", listener.whisperData),
zap.Int("maxGlobs", listener.maxGlobs),
zap.String("scanFrequency", listener.scanFrequency.String()),
)
listener.exitChan = make(chan struct{})
if (listener.trigramIndex || listener.trieIndex) && listener.scanFrequency != 0 {
listener.forceScanChan = make(chan struct{})
go listener.fileListUpdater(listener.whisperData, time.Tick(listener.scanFrequency), listener.forceScanChan, listener.exitChan) //nolint:staticcheck
listener.forceScanChan <- struct{}{}
}
listener.queryCache = queryCache{ec: expirecache.New(uint64(listener.queryCacheSizeMB))}
// +1 to track every over the number of buckets we track
listener.timeBuckets = make([]uint64, listener.buckets+1)
carbonserverMux := http.NewServeMux()
wrapHandler := func(h http.HandlerFunc, handlerStatusCodes []uint64) http.HandlerFunc {
return httputil.TrackConnections(
httputil.TimeHandler(
TraceHandler(
listener.rateLimitRequest(h),
statusCodes["combined"],
handlerStatusCodes,
listener.prometheus.request,
),
listener.bucketRequestTimes,
),
)
}
carbonserverMux.HandleFunc("/_internal/capabilities/", wrapHandler(listener.capabilityHandler, statusCodes["capabilities"]))
carbonserverMux.HandleFunc("/metrics/find/", wrapHandler(listener.findHandler, statusCodes["find"]))
carbonserverMux.HandleFunc("/metrics/list/", wrapHandler(listener.listHandler, statusCodes["list"]))
carbonserverMux.HandleFunc("/metrics/list_query/", wrapHandler(listener.listQueryHandler, statusCodes["list"]))
carbonserverMux.HandleFunc("/metrics/details/", wrapHandler(listener.detailsHandler, statusCodes["details"]))
carbonserverMux.HandleFunc("/render/", wrapHandler(listener.renderHandler, statusCodes["render"]))
carbonserverMux.HandleFunc("/info/", wrapHandler(listener.infoHandler, statusCodes["info"]))
carbonserverMux.HandleFunc("/forcescan", func(w http.ResponseWriter, r *http.Request) {
select {
case listener.forceScanChan <- struct{}{}:
w.WriteHeader(http.StatusAccepted)
case <-time.After(time.Second):
w.WriteHeader(http.StatusServiceUnavailable)
}
})
carbonserverMux.HandleFunc("/admin/quota", func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "text/plain")
fidx := listener.CurrentFileIndex()
if fidx == nil && fidx.trieIdx == nil {
fmt.Fprintf(w, "index doesn't exist.")
return
}
fidx.trieIdx.getQuotaTree(w)
})
carbonserverMux.HandleFunc("/admin/info", func(w http.ResponseWriter, r *http.Request) {
// URL: /admin/info?scopes=cache,config
w.Header().Add("Content-Type", "application/json")
// Parameter "scopes" is a csv string. Valid values: cache, config.
// By default, /admin/info returns all admin info
var scopes map[string]bool
if fs := strings.TrimSpace(r.URL.Query().Get("scopes")); fs != "" {
scopes = map[string]bool{}
for _, f := range strings.Split(fs, ",") {
scopes[strings.TrimSpace(f)] = true
}
}
infos := map[string]map[string]interface{}{}
for name, f := range listener.interfalInfoCallbacks {
if scopes != nil && !scopes[name] {
continue
}
infos[name] = f()
}
json.NewEncoder(w).Encode(infos)
})
carbonserverMux.HandleFunc("/robots.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "User-agent: *\nDisallow: /")
})
tcpAddr, err := net.ResolveTCPAddr("tcp", listen)
if err != nil {
return err
}
listener.tcpListener, err = net.ListenTCP("tcp", tcpAddr)
if err != nil {
return err
}
if listener.internalStatsDir != "" {
err = listener.initStatsDB()
if err != nil {
logger.Error("Failed to reinitialize statistics database")
} else {
accessTimes := make(map[string]int64)
iter := listener.db.NewIterator(nil, nil)
for iter.Next() {
// Remember that the contents of the returned slice should not be modified, and
// only valid until the next call to Next.
key := iter.Key()
value := iter.Value()
v, r := binary.Varint(value)
if r <= 0 {
logger.Error("Can't parse value",
zap.String("key", string(key)),
)
continue
}
accessTimes[string(key)] = v
}
iter.Release()
err = iter.Error()
if err != nil {
logger.Info("Error reading from statistics database",
zap.Error(err),
)
listener.db.Close()
err = removeDirectory(listener.internalStatsDir)
if err != nil {
logger.Error("Failed to reinitialize statistics database",
zap.Error(err),
)
} else {
err = listener.initStatsDB()
if err != nil {
logger.Error("Failed to reinitialize statistics database",
zap.Error(err),
)
}
}
}
listener.UpdateMetricsAccessTimes(accessTimes, true)
}
}
go listener.queryCache.ec.StoppableApproximateCleaner(10*time.Second, listener.exitChan)
srv := &http.Server{
Handler: gziphandler.GzipHandler(carbonserverMux),
ReadTimeout: listener.readTimeout,
IdleTimeout: listener.idleTimeout,
WriteTimeout: listener.writeTimeout,
}
go srv.Serve(listener.tcpListener)
return nil
}
func (listener *CarbonserverListener) bucketRequestTimes(req *http.Request, t time.Duration) {
listener.prometheus.duration(t)
ms := t.Nanoseconds() / int64(time.Millisecond)
if len(listener.percentiles) > 0 {
listener.requestsTimes.Lock()
listener.requestsTimes.list = append(listener.requestsTimes.list, t.Nanoseconds())
listener.requestsTimes.Unlock()
}
bucket := int(math.Log(float64(ms)) * math.Log10E)
if bucket < 0 {
bucket = 0
}
if bucket < listener.buckets {
atomic.AddUint64(&listener.timeBuckets[bucket], 1)
} else {
// Too big? Increment overflow bucket and log
atomic.AddUint64(&listener.timeBuckets[listener.buckets], 1)
listener.logger.Info("slow request",
zap.String("url", req.URL.RequestURI()),
zap.String("peer", req.RemoteAddr),
)
}
}
func extractTrigrams(query string) []trigram.T {
if len(query) < 3 {
return nil
}
var start int
var i int
var trigrams []trigram.T
for i < len(query) {
if query[i] == '[' || query[i] == '*' || query[i] == '?' {
trigrams = trigram.Extract(query[start:i], trigrams)
if query[i] == '[' {
for i < len(query) && query[i] != ']' {
i++
}
}
start = i + 1
}
i++
}
if start < i {
trigrams = trigram.Extract(query[start:i], trigrams)
}
return trigrams
}
type fileListCache struct {
path string
mode byte
file *os.File
scanner *bufio.Scanner
writer *gzip.Writer
}
func newFileListCache(p string, mode byte) (*fileListCache, error) {
var flc fileListCache
var err error
flc.path = p
flc.mode = mode
if mode == 'r' {
flc.file, err = os.Open(p)
if err != nil {
return nil, err
}
r, err := gzip.NewReader(flc.file)
if err != nil {
return nil, err
}
flc.scanner = bufio.NewScanner(r)
}
if mode == 'w' {
flc.file, err = os.Create(p + ".tmp")
if err != nil {
return nil, err
}
flc.writer = gzip.NewWriter(flc.file)
}
return &flc, err
}
func (flc *fileListCache) write(p string) error {
_, err := flc.writer.Write([]byte(p + "\n"))
return err
}
func (flc *fileListCache) close() error {
var errs []string
if flc.mode == 'w' {
if err := flc.writer.Flush(); err != nil {
errs = append(errs, fmt.Sprintf("gzip.flush: %s", err))
}
if err := flc.writer.Close(); err != nil {
errs = append(errs, fmt.Sprintf("gzip.close: %s", err))
}
if err := flc.file.Sync(); err != nil {
errs = append(errs, fmt.Sprintf("file.sync: %s", err))
}
}
if err := flc.file.Close(); err != nil {
errs = append(errs, fmt.Sprintf("file.sync: %s", err))
}
if flc.mode == 'w' && len(errs) == 0 {
if err := os.Rename(flc.path+".tmp", flc.path); err != nil {
errs = append(errs, fmt.Sprintf("file.rename: %s", err))
}
}
if len(errs) > 0 {
return errors.New(strings.Join(errs, ";"))
}
return nil
}
func (listener *CarbonserverListener) RegisterInternalInfoHandler(name string, f func() map[string]interface{}) {
if listener.interfalInfoCallbacks == nil {
listener.interfalInfoCallbacks = map[string]func() map[string]interface{}{}
}
listener.interfalInfoCallbacks[name] = f
}
type GlobQueryRateLimiter struct {
pattern *regexp.Regexp
maxInflightRequests chan struct{}
}
func NewGlobQueryRateLimiter(pattern string, max uint) (*GlobQueryRateLimiter, error) {
exp, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
return &GlobQueryRateLimiter{pattern: exp, maxInflightRequests: make(chan struct{}, max)}, nil
}
type ApiPerPathRatelimiter struct {
maxInflightRequests chan struct{}
timeout time.Duration
}
func NewApiPerPathRatelimiter(maxInflightRequests uint, timeout time.Duration) *ApiPerPathRatelimiter {
return &ApiPerPathRatelimiter{
maxInflightRequests: make(chan struct{}, maxInflightRequests),
timeout: timeout,
}
}
|
NewCarbonserverListener
|
expr.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # Translation of Expressions
//!
//! The expr module handles translation of expressions. The most general
//! translation routine is `trans()`, which will translate an expression
//! into a datum. `trans_into()` is also available, which will translate
//! an expression and write the result directly into memory, sometimes
//! avoiding the need for a temporary stack slot. Finally,
//! `trans_to_lvalue()` is available if you'd like to ensure that the
//! result has cleanup scheduled.
//!
//! Internally, each of these functions dispatches to various other
//! expression functions depending on the kind of expression. We divide
//! up expressions into:
//!
//! - **Datum expressions:** Those that most naturally yield values.
//! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
//! - **DPS expressions:** Those that most naturally write into a location
//! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
//! - **Statement expressions:** That that do not generate a meaningful
//! result. Examples would be `while { ... }` or `return 44`.
//!
//! Public entry points:
//!
//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
//! storing the result into `dest`. This is the preferred form, if you
//! can manage it.
//!
//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
//! `Datum` with the result. You can then store the datum, inspect
//! the value, etc. This may introduce temporaries if the datum is a
//! structural type.
//!
//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
//! expression and ensures that the result has a cleanup associated with it,
//! creating a temporary stack slot if necessary.
//!
//! - `trans_local_var -> Datum`: looks up a local variable or upvar.
#![allow(non_camel_case_types)]
pub use self::Dest::*;
use self::lazy_binop_ty::*;
use back::abi;
use llvm::{self, ValueRef, TypeKind};
use middle::check_const;
use middle::def;
use middle::lang_items::CoerceUnsizedTraitLangItem;
use middle::subst::{Substs, VecPerParamSpace};
use middle::traits;
use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
use trans::base::*;
use trans::build::*;
use trans::cleanup::{self, CleanupMethods};
use trans::common::*;
use trans::datum::*;
use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
use trans::glue;
use trans::machine;
use trans::meth;
use trans::monomorphize;
use trans::tvec;
use trans::type_of;
use middle::cast::{CastKind, CastTy};
use middle::ty::{AdjustDerefRef, AdjustReifyFnPointer, AdjustUnsafeFnPointer};
use middle::ty::{self, Ty};
use middle::ty::MethodCall;
use util::common::indenter;
use trans::machine::{llsize_of, llsize_of_alloc};
use trans::type_::Type;
use syntax::{ast, ast_util, codemap};
use syntax::parse::token::InternedString;
use syntax::ptr::P;
use syntax::parse::token;
use std::mem;
// Destinations
// These are passed around by the code generating functions to track the
// destination of a computation's value.
#[derive(Copy, Clone, PartialEq)]
pub enum Dest {
SaveIn(ValueRef),
Ignore,
}
impl Dest {
pub fn to_string(&self, ccx: &CrateContext) -> String {
match *self {
SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
Ignore => "Ignore".to_string()
}
}
}
/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
/// better optimized LLVM code.
pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
dest: Dest)
-> Block<'blk, 'tcx> {
let mut bcx = bcx;
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
if bcx.tcx().tables.borrow().adjustments.contains_key(&expr.id) {
// use trans, which may be less efficient but
// which will perform the adjustments:
let datum = unpack_datum!(bcx, trans(bcx, expr));
return datum.store_to_dest(bcx, dest, expr.id);
}
let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
if !qualif.intersects(
check_const::ConstQualif::NOT_CONST |
check_const::ConstQualif::NEEDS_DROP
) {
if !qualif.intersects(check_const::ConstQualif::PREFER_IN_PLACE) {
if let SaveIn(lldest) = dest {
let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
bcx.fcx.param_substs);
// Cast pointer to destination, because constants
// have different types.
let lldest = PointerCast(bcx, lldest, val_ty(global));
memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
return bcx;
}
// Even if we don't have a value to emit, and the expression
// doesn't have any side-effects, we still have to translate the
// body of any closures.
// FIXME: Find a better way of handling this case.
} else {
// The only way we're going to see a `const` at this point is if
// it prefers in-place instantiation, likely because it contains
// `[x; N]` somewhere within.
match expr.node {
ast::ExprPath(..) => {
match bcx.def(expr.id) {
def::DefConst(did) => {
let const_expr = consts::get_const_expr(bcx.ccx(), did, expr);
// Temporarily get cleanup scopes out of the way,
// as they require sub-expressions to be contained
// inside the current AST scope.
// These should record no cleanups anyways, `const`
// can't have destructors.
let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
vec![]);
// Lock emitted debug locations to the location of
// the constant reference expression.
debuginfo::with_source_location_override(bcx.fcx,
expr.debug_loc(),
|| {
bcx = trans_into(bcx, const_expr, dest)
});
let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
scopes);
assert!(scopes.is_empty());
return bcx;
}
_ => {}
}
}
_ => {}
}
}
}
debug!("trans_into() expr={:?}", expr);
let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
expr.id,
expr.span,
false);
bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
let kind = expr_kind(bcx.tcx(), expr);
bcx = match kind {
ExprKind::Lvalue | ExprKind::RvalueDatum => {
trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
}
ExprKind::RvalueDps => {
trans_rvalue_dps_unadjusted(bcx, expr, dest)
}
ExprKind::RvalueStmt => {
trans_rvalue_stmt_unadjusted(bcx, expr)
}
};
bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
}
/// Translates an expression, returning a datum (and new block) encapsulating the result. When
/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
/// stack.
pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
debug!("trans(expr={:?})", expr);
let mut bcx = bcx;
let fcx = bcx.fcx;
let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
let adjusted_global = !qualif.intersects(check_const::ConstQualif::NON_STATIC_BORROWS);
let global = if !qualif.intersects(
check_const::ConstQualif::NOT_CONST |
check_const::ConstQualif::NEEDS_DROP
) {
let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
bcx.fcx.param_substs);
if qualif.intersects(check_const::ConstQualif::HAS_STATIC_BORROWS) {
// Is borrowed as 'static, must return lvalue.
// Cast pointer to global, because constants have different types.
let const_ty = expr_ty_adjusted(bcx, expr);
let llty = type_of::type_of(bcx.ccx(), const_ty);
let global = PointerCast(bcx, global, llty.ptr_to());
let datum = Datum::new(global, const_ty, Lvalue);
return DatumBlock::new(bcx, datum.to_expr_datum());
}
// Otherwise, keep around and perform adjustments, if needed.
let const_ty = if adjusted_global {
expr_ty_adjusted(bcx, expr)
} else {
expr_ty(bcx, expr)
};
// This could use a better heuristic.
Some(if type_is_immediate(bcx.ccx(), const_ty) {
// Cast pointer to global, because constants have different types.
let llty = type_of::type_of(bcx.ccx(), const_ty);
let global = PointerCast(bcx, global, llty.ptr_to());
// Maybe just get the value directly, instead of loading it?
immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
} else {
let llty = type_of::type_of(bcx.ccx(), const_ty);
// HACK(eddyb) get around issues with lifetime intrinsics.
let scratch = alloca_no_lifetime(bcx, llty, "const");
let lldest = if !const_ty.is_structural() {
// Cast pointer to slot, because constants have different types.
PointerCast(bcx, scratch, val_ty(global))
} else {
// In this case, memcpy_ty calls llvm.memcpy after casting both
// source and destination to i8*, so we don't need any casts.
scratch
};
memcpy_ty(bcx, lldest, global, const_ty);
Datum::new(scratch, const_ty, Rvalue::new(ByRef))
})
} else {
None
};
let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
expr.id,
expr.span,
false);
fcx.push_ast_cleanup_scope(cleanup_debug_loc);
let datum = match global {
Some(rvalue) => rvalue.to_expr_datum(),
None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
};
let datum = if adjusted_global {
datum // trans::consts already performed adjustments.
} else {
unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
};
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
return DatumBlock::new(bcx, datum);
}
pub fn get_len(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_EXTRA])
}
pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_ADDR])
}
pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
Store(bcx, Load(bcx, get_len(bcx, src_ptr)), get_len(bcx, dst_ptr));
}
/// Retrieve the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
/// The `old_info` argument is a bit funny. It is intended for use
/// in an upcast, where the new vtable for an object will be drived
/// from the old one.
pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<ValueRef>,
param_substs: &'tcx Substs<'tcx>)
-> ValueRef {
let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
(&ty::TyTrait(_), &ty::TyTrait(_)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
old_info.expect("unsized_info: missing old info for trait upcast")
}
(_, &ty::TyTrait(box ty::TraitTy { ref principal, .. })) => {
// Note that we preserve binding levels here:
let substs = principal.0.substs.with_self_ty(source).erase_regions();
let substs = ccx.tcx().mk_substs(substs);
let trait_ref = ty::Binder(ty::TraitRef { def_id: principal.def_id(),
substs: substs });
consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs),
Type::vtable_ptr(ccx))
}
_ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {:?} -> {:?}",
source,
target))
}
}
/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
/// translation of `expr`.
fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>)
-> DatumBlock<'blk, 'tcx, Expr>
{
let mut bcx = bcx;
let mut datum = datum;
let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
None => {
return DatumBlock::new(bcx, datum);
}
Some(adj) => { adj }
};
debug!("unadjusted datum for expr {:?}: {} adjustment={:?}",
expr,
datum.to_string(bcx.ccx()),
adjustment);
match adjustment {
AdjustReifyFnPointer => {
// FIXME(#19925) once fn item types are
// zero-sized, we'll need to do something here
}
AdjustUnsafeFnPointer => {
// purely a type-level thing
}
AdjustDerefRef(ref adj) => {
let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
// We are a bit paranoid about adjustments and thus might have a re-
// borrow here which merely derefs and then refs again (it might have
// a different region or mutability, but we don't care here).
match datum.ty.sty {
// Don't skip a conversion from Box<T> to &T, etc.
ty::TyRef(..) => {
if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
// Don't skip an overloaded deref.
0
} else {
1
}
}
_ => 0
}
} else {
0
};
if adj.autoderefs > skip_reborrows {
// Schedule cleanup.
let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
lval.to_expr_datum(),
adj.autoderefs - skip_reborrows));
}
// (You might think there is a more elegant way to do this than a
// skip_reborrows bool, but then you remember that the borrow checker exists).
if skip_reborrows == 0 && adj.autoref.is_some() {
if !type_is_sized(bcx.tcx(), datum.ty) {
// Arrange cleanup
let lval = unpack_datum!(bcx,
datum.to_lvalue_datum(bcx, "ref_fat_ptr", expr.id));
datum = unpack_datum!(bcx, ref_fat_ptr(bcx, lval));
} else {
datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
}
}
if let Some(target) = adj.unsize {
// We do not arrange cleanup ourselves; if we already are an
// L-value, then cleanup will have already been scheduled (and
// the `datum.to_rvalue_datum` call below will emit code to zero
// the drop flag when moving out of the L-value). If we are an
// R-value, then we do not need to schedule cleanup.
let source_datum = unpack_datum!(bcx,
datum.to_rvalue_datum(bcx, "__coerce_source"));
let target = bcx.monomorphize(&target);
let llty = type_of::type_of(bcx.ccx(), target);
// HACK(eddyb) get around issues with lifetime intrinsics.
let scratch = alloca_no_lifetime(bcx, llty, "__coerce_target");
let target_datum = Datum::new(scratch, target,
Rvalue::new(ByRef));
bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
datum = Datum::new(scratch, target,
RvalueExpr(Rvalue::new(ByRef)));
}
}
}
debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
DatumBlock::new(bcx, datum)
}
fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
span: codemap::Span,
source: Datum<'tcx, Rvalue>,
target: Datum<'tcx, Rvalue>)
-> Block<'blk, 'tcx> {
let mut bcx = bcx;
debug!("coerce_unsized({} -> {})",
source.to_string(bcx.ccx()),
target.to_string(bcx.ccx()));
match (&source.ty.sty, &target.ty.sty) {
(&ty::TyBox(a), &ty::TyBox(b)) |
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
&ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
(&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
let (inner_source, inner_target) = (a, b);
let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
// Normally, the source is a thin pointer and we are
// adding extra info to make a fat pointer. The exception
// is when we are upcasting an existing object fat pointer
// to use a different vtable. In that case, we want to
// load out the original data pointer so we can repackage
// it.
(Load(bcx, get_dataptr(bcx, source.val)),
Some(Load(bcx, get_len(bcx, source.val))))
} else {
let val = if source.kind.is_by_ref() {
load_ty(bcx, source.val, source.ty)
} else {
source.val
};
(val, None)
};
let info = unsized_info(bcx.ccx(), inner_source, inner_target,
old_info, bcx.fcx.param_substs);
// Compute the base pointer. This doesn't change the pointer value,
// but merely its type.
let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
let base = PointerCast(bcx, base, ptr_ty);
Store(bcx, base, get_dataptr(bcx, target.val));
Store(bcx, info, get_len(bcx, target.val));
}
// This can be extended to enums and tuples in the future.
// (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
(&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
assert_eq!(def_id_a, def_id_b);
// The target is already by-ref because it's to be written to.
let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
assert!(target.kind.is_by_ref());
let trait_substs = Substs::erased(VecPerParamSpace::new(vec![target.ty],
vec![source.ty],
Vec::new()));
let trait_ref = ty::Binder(ty::TraitRef {
def_id: langcall(bcx, Some(span), "coercion",
CoerceUnsizedTraitLangItem),
substs: bcx.tcx().mk_substs(trait_substs)
});
let kind = match fulfill_obligation(bcx.ccx(), span, trait_ref) {
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
bcx.tcx().custom_coerce_unsized_kind(impl_def_id)
}
vtable => {
bcx.sess().span_bug(span, &format!("invalid CoerceUnsized vtable: {:?}",
vtable));
}
};
let repr_source = adt::represent_type(bcx.ccx(), source.ty);
let src_fields = match &*repr_source {
&adt::Repr::Univariant(ref s, _) => &s.fields,
_ => bcx.sess().span_bug(span,
&format!("Non univariant struct? (repr_source: {:?})",
repr_source)),
};
let repr_target = adt::represent_type(bcx.ccx(), target.ty);
let target_fields = match &*repr_target {
&adt::Repr::Univariant(ref s, _) => &s.fields,
_ => bcx.sess().span_bug(span,
&format!("Non univariant struct? (repr_target: {:?})",
repr_target)),
};
let coerce_index = match kind {
ty::CustomCoerceUnsized::Struct(i) => i
};
assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
let iter = src_fields.iter().zip(target_fields).enumerate();
for (i, (src_ty, target_ty)) in iter {
let ll_source = adt::trans_field_ptr(bcx, &repr_source, source.val, 0, i);
let ll_target = adt::trans_field_ptr(bcx, &repr_target, target.val, 0, i);
// If this is the field we need to coerce, recurse on it.
if i == coerce_index {
coerce_unsized(bcx, span,
Datum::new(ll_source, src_ty,
Rvalue::new(ByRef)),
Datum::new(ll_target, target_ty,
Rvalue::new(ByRef)));
} else {
// Otherwise, simply copy the data from the source.
assert_eq!(src_ty, target_ty);
memcpy_ty(bcx, ll_target, ll_source, src_ty);
}
}
}
_ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {:?} -> {:?}",
source.ty,
target.ty))
}
bcx
}
/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
/// that the expr represents.
///
/// If this expression is an rvalue, this implies introducing a temporary. In other words,
/// something like `x().f` is translated into roughly the equivalent of
///
/// { tmp = x(); tmp.f }
pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
name: &str)
-> DatumBlock<'blk, 'tcx, Lvalue> {
let mut bcx = bcx;
let datum = unpack_datum!(bcx, trans(bcx, expr));
return datum.to_lvalue_datum(bcx, name, expr.id);
}
/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
/// directly.
fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
debug!("trans_unadjusted(expr={:?})", expr);
let _indenter = indenter();
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
return match expr_kind(bcx.tcx(), expr) {
ExprKind::Lvalue | ExprKind::RvalueDatum => {
let datum = unpack_datum!(bcx, {
trans_datum_unadjusted(bcx, expr)
});
DatumBlock {bcx: bcx, datum: datum}
}
ExprKind::RvalueStmt => {
bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
nil(bcx, expr_ty(bcx, expr))
}
ExprKind::RvalueDps => {
let ty = expr_ty(bcx, expr);
if type_is_zero_size(bcx.ccx(), ty) {
bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
nil(bcx, ty)
} else {
let scratch = rvalue_scratch_datum(bcx, ty, "");
bcx = trans_rvalue_dps_unadjusted(
bcx, expr, SaveIn(scratch.val));
// Note: this is not obviously a good idea. It causes
// immediate values to be loaded immediately after a
// return from a call or other similar expression,
// which in turn leads to alloca's having shorter
// lifetimes and hence larger stack frames. However,
// in turn it can lead to more register pressure.
// Still, in practice it seems to increase
// performance, since we have fewer problems with
// morestack churn.
let scratch = unpack_datum!(
bcx, scratch.to_appropriate_datum(bcx));
DatumBlock::new(bcx, scratch.to_expr_datum())
}
}
};
fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
-> DatumBlock<'blk, 'tcx, Expr> {
let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
let datum = immediate_rvalue(llval, ty);
DatumBlock::new(bcx, datum.to_expr_datum())
}
}
fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let fcx = bcx.fcx;
let _icx = push_ctxt("trans_datum_unadjusted");
match expr.node {
ast::ExprParen(ref e) => {
trans(bcx, &**e)
}
ast::ExprPath(..) => {
trans_def(bcx, expr, bcx.def(expr.id))
}
ast::ExprField(ref base, ident) => {
trans_rec_field(bcx, &**base, ident.node.name)
}
ast::ExprTupField(ref base, idx) => {
trans_rec_tup_field(bcx, &**base, idx.node)
}
ast::ExprIndex(ref base, ref idx) => {
trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
}
ast::ExprBox(_, ref contents) => {
// Special case for `Box<T>`
let box_ty = expr_ty(bcx, expr);
let contents_ty = expr_ty(bcx, &**contents);
match box_ty.sty {
ty::TyBox(..) => {
trans_uniq_expr(bcx, expr, box_ty, &**contents, contents_ty)
}
_ => bcx.sess().span_bug(expr.span,
"expected unique box")
}
}
ast::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &**lit),
ast::ExprBinary(op, ref lhs, ref rhs) => {
trans_binary(bcx, expr, op, &**lhs, &**rhs)
}
ast::ExprUnary(op, ref x) => {
trans_unary(bcx, expr, op, &**x)
}
ast::ExprAddrOf(_, ref x) => {
match x.node {
ast::ExprRepeat(..) | ast::ExprVec(..) => {
// Special case for slices.
let cleanup_debug_loc =
debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
x.id,
x.span,
false);
fcx.push_ast_cleanup_scope(cleanup_debug_loc);
let datum = unpack_datum!(
bcx, tvec::trans_slice_vec(bcx, expr, &**x));
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
DatumBlock::new(bcx, datum)
}
_ => {
trans_addr_of(bcx, expr, &**x)
}
}
}
ast::ExprCast(ref val, _) => {
// Datum output mode means this is a scalar cast:
trans_imm_cast(bcx, &**val, expr.id)
}
_ => {
bcx.tcx().sess.span_bug(
expr.span,
&format!("trans_rvalue_datum_unadjusted reached \
fall-through case: {:?}",
expr.node));
}
}
}
fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
base: &ast::Expr,
get_idx: F)
-> DatumBlock<'blk, 'tcx, Expr> where
F: FnOnce(&'blk ty::ctxt<'tcx>, &[ty::Field<'tcx>]) -> usize,
{
let mut bcx = bcx;
let _icx = push_ctxt("trans_rec_field");
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
let bare_ty = base_datum.ty;
let repr = adt::represent_type(bcx.ccx(), bare_ty);
with_field_tys(bcx.tcx(), bare_ty, None, move |discr, field_tys| {
let ix = get_idx(bcx.tcx(), field_tys);
let d = base_datum.get_element(
bcx,
field_tys[ix].mt.ty,
|srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, ix));
if type_is_sized(bcx.tcx(), d.ty) {
DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
} else {
let scratch = rvalue_scratch_datum(bcx, d.ty, "");
Store(bcx, d.val, get_dataptr(bcx, scratch.val));
let info = Load(bcx, get_len(bcx, base_datum.val));
Store(bcx, info, get_len(bcx, scratch.val));
// Always generate an lvalue datum, because this pointer doesn't own
// the data and cleanup is scheduled elsewhere.
DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr))
}
})
}
/// Translates `base.field`.
fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
base: &ast::Expr,
field: ast::Name)
-> DatumBlock<'blk, 'tcx, Expr> {
trans_field(bcx, base, |tcx, field_tys| tcx.field_idx_strict(field, field_tys))
}
/// Translates `base.<idx>`.
fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
base: &ast::Expr,
idx: usize)
-> DatumBlock<'blk, 'tcx, Expr> {
trans_field(bcx, base, |_, _| idx)
}
fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
index_expr: &ast::Expr,
base: &ast::Expr,
idx: &ast::Expr,
method_call: MethodCall)
-> DatumBlock<'blk, 'tcx, Expr> {
//! Translates `base[idx]`.
let _icx = push_ctxt("trans_index");
let ccx = bcx.ccx();
let mut bcx = bcx;
let index_expr_debug_loc = index_expr.debug_loc();
// Check for overloaded index.
let method_ty = ccx.tcx()
.tables
.borrow()
.method_map
.get(&method_call)
.map(|method| method.ty);
let elt_datum = match method_ty {
Some(method_ty) => {
let method_ty = monomorphize_type(bcx, method_ty);
let base_datum = unpack_datum!(bcx, trans(bcx, base));
// Translate index expression.
let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
let ref_ty = // invoked methods have LB regions instantiated:
bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
let elt_ty = match ref_ty.builtin_deref(true) {
None => {
bcx.tcx().sess.span_bug(index_expr.span,
"index method didn't return a \
dereferenceable type?!")
}
Some(elt_tm) => elt_tm.ty,
};
// Overloaded. Evaluate `trans_overloaded_op`, which will
// invoke the user's index() method, which basically yields
// a `&T` pointer. We can then proceed down the normal
// path (below) to dereference that `&T`.
let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
unpack_result!(bcx,
trans_overloaded_op(bcx,
index_expr,
method_call,
base_datum,
Some((ix_datum, idx.id)),
Some(SaveIn(scratch.val)),
false));
let datum = scratch.to_expr_datum();
if type_is_sized(bcx.tcx(), elt_ty) {
Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr)
} else {
Datum::new(datum.val, elt_ty, LvalueExpr)
}
}
None => {
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
base,
"index"));
// Translate index expression and cast to a suitable LLVM integer.
// Rust is less strict than LLVM in this regard.
let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
let ix_val = ix_datum.to_llscalarish(bcx);
let ix_size = machine::llbitsize_of_real(bcx.ccx(),
val_ty(ix_val));
let int_size = machine::llbitsize_of_real(bcx.ccx(),
ccx.int_type());
let ix_val = {
if ix_size < int_size {
if expr_ty(bcx, idx).is_signed() {
SExt(bcx, ix_val, ccx.int_type())
} else { ZExt(bcx, ix_val, ccx.int_type()) }
} else if ix_size > int_size {
Trunc(bcx, ix_val, ccx.int_type())
} else {
ix_val
}
};
let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
let (base, len) = base_datum.get_vec_base_and_len(bcx);
debug!("trans_index: base {}", bcx.val_to_string(base));
debug!("trans_index: len {}", bcx.val_to_string(len));
let bounds_check = ICmp(bcx,
llvm::IntUGE,
ix_val,
len,
index_expr_debug_loc);
let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
let expected = Call(bcx,
expect,
&[bounds_check, C_bool(ccx, false)],
None,
index_expr_debug_loc);
bcx = with_cond(bcx, expected, |bcx| {
controlflow::trans_fail_bounds_check(bcx,
expr_info(index_expr),
ix_val,
len)
});
let elt = InBoundsGEP(bcx, base, &[ix_val]);
let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
Datum::new(elt, unit_ty, LvalueExpr)
}
};
DatumBlock::new(bcx, elt_datum)
}
fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ref_expr: &ast::Expr,
def: def::Def)
-> DatumBlock<'blk, 'tcx, Expr> {
//! Translates a reference to a path.
let _icx = push_ctxt("trans_def_lvalue");
match def {
def::DefFn(..) | def::DefMethod(..) |
def::DefStruct(_) | def::DefVariant(..) => {
let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def,
bcx.fcx.param_substs);
DatumBlock::new(bcx, datum.to_expr_datum())
}
def::DefStatic(did, _) => {
// There are two things that may happen here:
// 1) If the static item is defined in this crate, it will be
// translated using `get_item_val`, and we return a pointer to
// the result.
// 2) If the static item is defined in another crate then we add
// (or reuse) a declaration of an external global, and return a
// pointer to that.
let const_ty = expr_ty(bcx, ref_expr);
// For external constants, we don't inline.
let val = if did.krate == ast::LOCAL_CRATE {
// Case 1.
// The LLVM global has the type of its initializer,
// which may not be equal to the enum's type for
// non-C-like enums.
let val = base::get_item_val(bcx.ccx(), did.node);
let pty = type_of::type_of(bcx.ccx(), const_ty).ptr_to();
PointerCast(bcx, val, pty)
} else {
// Case 2.
base::get_extern_const(bcx.ccx(), did, const_ty)
};
DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr))
}
def::DefConst(_) => {
bcx.sess().span_bug(ref_expr.span,
"constant expression should not reach expr::trans_def")
}
_ => {
DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
}
}
}
fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr)
-> Block<'blk, 'tcx> {
let mut bcx = bcx;
let _icx = push_ctxt("trans_rvalue_stmt");
if bcx.unreachable.get() {
return bcx;
}
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
match expr.node {
ast::ExprParen(ref e) => {
trans_into(bcx, &**e, Ignore)
}
ast::ExprBreak(label_opt) => {
controlflow::trans_break(bcx, expr, label_opt)
}
ast::ExprAgain(label_opt) => {
controlflow::trans_cont(bcx, expr, label_opt)
}
ast::ExprRet(ref ex) => {
// Check to see if the return expression itself is reachable.
// This can occur when the inner expression contains a return
let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
cfg.node_is_reachable(expr.id)
} else {
true
};
if reachable {
controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
} else {
// If it's not reachable, just translate the inner expression
// directly. This avoids having to manage a return slot when
// it won't actually be used anyway.
if let &Some(ref x) = ex {
bcx = trans_into(bcx, &**x, Ignore);
}
// Mark the end of the block as unreachable. Once we get to
// a return expression, there's no more we should be doing
// after this.
Unreachable(bcx);
bcx
}
}
ast::ExprWhile(ref cond, ref body, _) => {
controlflow::trans_while(bcx, expr, &**cond, &**body)
}
ast::ExprLoop(ref body, _) => {
controlflow::trans_loop(bcx, expr, &**body)
}
ast::ExprAssign(ref dst, ref src) => {
let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &**dst, "assign"));
if bcx.fcx.type_needs_drop(dst_datum.ty) {
// If there are destructors involved, make sure we
// are copying from an rvalue, since that cannot possible
// alias an lvalue. We are concerned about code like:
//
// a = a
//
// but also
//
// a = a.b
//
// where e.g. a : Option<Foo> and a.b :
// Option<Foo>. In that case, freeing `a` before the
// assignment may also free `a.b`!
//
// We could avoid this intermediary with some analysis
// to determine whether `dst` may possibly own `src`.
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
let src_datum = unpack_datum!(
bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
bcx = glue::drop_ty(bcx,
dst_datum.val,
dst_datum.ty,
expr.debug_loc());
src_datum.store_to(bcx, dst_datum.val)
} else {
src_datum.store_to(bcx, dst_datum.val)
}
}
ast::ExprAssignOp(op, ref dst, ref src) => {
trans_assign_op(bcx, expr, op, &**dst, &**src)
}
ast::ExprInlineAsm(ref a) => {
asm::trans_inline_asm(bcx, a)
}
_ => {
bcx.tcx().sess.span_bug(
expr.span,
&format!("trans_rvalue_stmt_unadjusted reached \
fall-through case: {:?}",
expr.node));
}
}
}
fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
dest: Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
let mut bcx = bcx;
let tcx = bcx.tcx();
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
match expr.node {
ast::ExprParen(ref e) => {
trans_into(bcx, &**e, dest)
}
ast::ExprPath(..) => {
trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
}
ast::ExprIf(ref cond, ref thn, ref els) => {
controlflow::trans_if(bcx, expr.id, &**cond, &**thn, els.as_ref().map(|e| &**e), dest)
}
ast::ExprMatch(ref discr, ref arms, _) => {
_match::trans_match(bcx, expr, &**discr, &arms[..], dest)
}
ast::ExprBlock(ref blk) => {
controlflow::trans_block(bcx, &**blk, dest)
}
ast::ExprStruct(_, ref fields, ref base) => {
trans_struct(bcx,
&fields[..],
base.as_ref().map(|e| &**e),
expr.span,
expr.id,
node_id_type(bcx, expr.id),
dest)
}
ast::ExprRange(ref start, ref end) => {
// FIXME it is just not right that we are synthesising ast nodes in
// trans. Shudder.
fn make_field(field_name: &str, expr: P<ast::Expr>) -> ast::Field {
ast::Field {
ident: codemap::dummy_spanned(token::str_to_ident(field_name)),
expr: expr,
span: codemap::DUMMY_SP,
}
}
// A range just desugars into a struct.
// Note that the type of the start and end may not be the same, but
// they should only differ in their lifetime, which should not matter
// in trans.
let (did, fields, ty_params) = match (start, end) {
(&Some(ref start), &Some(ref end)) => {
// Desugar to Range
let fields = vec![make_field("start", start.clone()),
make_field("end", end.clone())];
(tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
}
(&Some(ref start), &None) => {
// Desugar to RangeFrom
let fields = vec![make_field("start", start.clone())];
(tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
}
(&None, &Some(ref end)) => {
// Desugar to RangeTo
let fields = vec![make_field("end", end.clone())];
(tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
}
_ => {
// Desugar to RangeFull
(tcx.lang_items.range_full_struct(), vec![], vec![])
}
};
if let Some(did) = did {
let substs = Substs::new_type(ty_params, vec![]);
trans_struct(bcx,
&fields,
None,
expr.span,
expr.id,
tcx.mk_struct(did, tcx.mk_substs(substs)),
dest)
} else {
tcx.sess.span_bug(expr.span,
"No lang item for ranges (how did we get this far?)")
}
}
ast::ExprTup(ref args) => {
let numbered_fields: Vec<(usize, &ast::Expr)> =
args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
trans_adt(bcx,
expr_ty(bcx, expr),
0,
&numbered_fields[..],
None,
dest,
expr.debug_loc())
}
ast::ExprLit(ref lit) => {
match lit.node {
ast::LitStr(ref s, _) => {
tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
}
_ => {
bcx.tcx()
.sess
.span_bug(expr.span,
"trans_rvalue_dps_unadjusted shouldn't be \
translating this type of literal")
}
}
}
ast::ExprVec(..) | ast::ExprRepeat(..) => {
tvec::trans_fixed_vstore(bcx, expr, dest)
}
ast::ExprClosure(_, ref decl, ref body) => {
let dest = match dest {
SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
Ignore => closure::Dest::Ignore(bcx.ccx())
};
closure::trans_closure_expr(dest, decl, body, expr.id, bcx.fcx.param_substs)
.unwrap_or(bcx)
}
ast::ExprCall(ref f, ref args) => {
if bcx.tcx().is_method_call(expr.id) {
trans_overloaded_call(bcx,
expr,
&**f,
&args[..],
Some(dest))
} else {
callee::trans_call(bcx,
expr,
&**f,
callee::ArgExprs(&args[..]),
dest)
}
}
ast::ExprMethodCall(_, _, ref args) => {
callee::trans_method_call(bcx,
expr,
&*args[0],
callee::ArgExprs(&args[..]),
dest)
}
ast::ExprBinary(op, ref lhs, ref rhs) => {
// if not overloaded, would be RvalueDatumExpr
let lhs = unpack_datum!(bcx, trans(bcx, &**lhs));
let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
Some((rhs_datum, rhs.id)), Some(dest),
!ast_util::is_by_value_binop(op.node)).bcx
}
ast::ExprUnary(op, ref subexpr) => {
// if not overloaded, would be RvalueDatumExpr
let arg = unpack_datum!(bcx, trans(bcx, &**subexpr));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
arg, None, Some(dest), !ast_util::is_by_value_unop(op)).bcx
}
ast::ExprIndex(ref base, ref idx) => {
// if not overloaded, would be RvalueDatumExpr
let base = unpack_datum!(bcx, trans(bcx, &**base));
let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
Some((idx_datum, idx.id)), Some(dest), true).bcx
}
ast::ExprCast(..) => {
// Trait casts used to come this way, now they should be coercions.
bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
}
ast::ExprAssignOp(op, ref dst, ref src) => {
trans_assign_op(bcx, expr, op, &**dst, &**src)
}
_ => {
bcx.tcx().sess.span_bug(
expr.span,
&format!("trans_rvalue_dps_unadjusted reached fall-through \
case: {:?}",
expr.node));
}
}
}
fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ref_expr: &ast::Expr,
def: def::Def,
dest: Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_def_dps_unadjusted");
let lldest = match dest {
SaveIn(lldest) => lldest,
Ignore => { return bcx; }
};
match def {
def::DefVariant(tid, vid, _) => {
let variant_info = bcx.tcx().enum_variant_with_id(tid, vid);
if !variant_info.args.is_empty() {
// N-ary variant.
let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
ExprId(ref_expr.id),
bcx.fcx.param_substs).val;
Store(bcx, llfn, lldest);
return bcx;
} else {
// Nullary variant.
let ty = expr_ty(bcx, ref_expr);
let repr = adt::represent_type(bcx.ccx(), ty);
adt::trans_set_discr(bcx, &*repr, lldest,
variant_info.disr_val);
return bcx;
}
}
def::DefStruct(_) => {
let ty = expr_ty(bcx, ref_expr);
match ty.sty {
ty::TyStruct(did, _) if bcx.tcx().has_dtor(did) => {
let repr = adt::represent_type(bcx.ccx(), ty);
adt::trans_set_discr(bcx, &*repr, lldest, 0);
}
_ => {}
}
bcx
}
_ => {
bcx.tcx().sess.span_bug(ref_expr.span, &format!(
"Non-DPS def {:?} referened by {}",
def, bcx.node_id_to_string(ref_expr.id)));
}
}
}
pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ref_expr: &ast::Expr,
def: def::Def,
param_substs: &'tcx Substs<'tcx>)
-> Datum<'tcx, Rvalue> {
let _icx = push_ctxt("trans_def_datum_unadjusted");
match def {
def::DefFn(did, _) |
def::DefStruct(did) | def::DefVariant(_, did, _) |
def::DefMethod(did, def::FromImpl(_)) => {
callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
}
def::DefMethod(impl_did, def::FromTrait(trait_did)) => {
meth::trans_static_method_callee(ccx, impl_did,
trait_did, ref_expr.id,
param_substs)
}
_ => {
ccx.tcx().sess.span_bug(ref_expr.span, &format!(
"trans_def_fn_unadjusted invoked on: {:?} for {:?}",
def,
ref_expr));
}
}
}
/// Translates a reference to a local variable or argument. This always results in an lvalue datum.
pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
def: def::Def)
-> Datum<'tcx, Lvalue> {
let _icx = push_ctxt("trans_local_var");
match def {
def::DefUpvar(nid, _) => {
// Can't move upvars, so this is never a ZeroMemLastUse.
let local_ty = node_id_type(bcx, nid);
match bcx.fcx.llupvars.borrow().get(&nid) {
Some(&val) => Datum::new(val, local_ty, Lvalue),
None => {
bcx.sess().bug(&format!(
"trans_local_var: no llval for upvar {} found",
nid));
}
}
}
def::DefLocal(nid) => {
let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
Some(&v) => v,
None => {
bcx.sess().bug(&format!(
"trans_local_var: no datum for local/arg {} found",
nid));
}
};
debug!("take_local(nid={}, v={}, ty={})",
nid, bcx.val_to_string(datum.val), datum.ty);
datum
}
_ => {
bcx.sess().unimpl(&format!(
"unsupported def type in trans_local_var: {:?}",
def));
}
}
}
/// Helper for enumerating the field types of structs, enums, or records. The optional node ID here
/// is the node ID of the path identifying the enum variant in use. If none, this cannot possibly
/// an enum variant (so, if it is and `node_id_opt` is none, this function panics).
pub fn with_field_tys<'tcx, R, F>(tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
node_id_opt: Option<ast::NodeId>,
op: F)
-> R where
F: FnOnce(ty::Disr, &[ty::Field<'tcx>]) -> R,
{
match ty.sty {
ty::TyStruct(did, substs) => {
let fields = tcx.struct_fields(did, substs);
let fields = monomorphize::normalize_associated_type(tcx, &fields);
op(0, &fields[..])
}
ty::TyTuple(ref v) => {
let fields: Vec<_> = v.iter().enumerate().map(|(i, &f)| {
ty::Field {
name: token::intern(&i.to_string()),
mt: ty::TypeAndMut {
ty: f,
mutbl: ast::MutImmutable
}
}
}).collect();
op(0, &fields)
}
ty::TyEnum(_, substs) => {
// We want the *variant* ID here, not the enum ID.
match node_id_opt {
None => {
tcx.sess.bug(&format!(
"cannot get field types from the enum type {:?} \
without a node ID",
ty));
}
Some(node_id) => {
let def = tcx.def_map.borrow().get(&node_id).unwrap().full_def();
match def {
def::DefVariant(enum_id, variant_id, _) => {
let variant_info = tcx.enum_variant_with_id(enum_id, variant_id);
let fields = tcx.struct_fields(variant_id, substs);
let fields = monomorphize::normalize_associated_type(tcx, &fields);
op(variant_info.disr_val, &fields[..])
}
_ => {
tcx.sess.bug("resolve didn't map this expr to a \
variant ID")
}
}
}
}
}
_ => {
tcx.sess.bug(&format!(
"cannot get field types from the type {:?}",
ty));
}
}
}
fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
fields: &[ast::Field],
base: Option<&ast::Expr>,
expr_span: codemap::Span,
expr_id: ast::NodeId,
ty: Ty<'tcx>,
dest: Dest) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_rec");
let tcx = bcx.tcx();
with_field_tys(tcx, ty, Some(expr_id), |discr, field_tys| {
let mut need_base = vec![true; field_tys.len()];
let numbered_fields = fields.iter().map(|field| {
let opt_pos =
field_tys.iter().position(|field_ty|
field_ty.name == field.ident.node.name);
let result = match opt_pos {
Some(i) => {
need_base[i] = false;
(i, &*field.expr)
}
None => {
tcx.sess.span_bug(field.span,
"Couldn't find field in struct type")
}
};
result
}).collect::<Vec<_>>();
let optbase = match base {
Some(base_expr) => {
let mut leftovers = Vec::new();
for (i, b) in need_base.iter().enumerate() {
if *b {
leftovers.push((i, field_tys[i].mt.ty));
}
}
Some(StructBaseInfo {expr: base_expr,
fields: leftovers })
}
None => {
if need_base.iter().any(|b| *b) {
tcx.sess.span_bug(expr_span, "missing fields and no base expr")
}
None
}
};
trans_adt(bcx,
ty,
discr,
&numbered_fields,
optbase,
dest,
DebugLoc::At(expr_id, expr_span))
})
}
/// Information that `trans_adt` needs in order to fill in the fields
/// of a struct copied from a base struct (e.g., from an expression
/// like `Foo { a: b, ..base }`.
///
/// Note that `fields` may be empty; the base expression must always be
/// evaluated for side-effects.
pub struct StructBaseInfo<'a, 'tcx> {
/// The base expression; will be evaluated after all explicit fields.
expr: &'a ast::Expr,
/// The indices of fields to copy paired with their types.
fields: Vec<(usize, Ty<'tcx>)>
}
/// Constructs an ADT instance:
///
/// - `fields` should be a list of field indices paired with the
/// expression to store into that field. The initializers will be
/// evaluated in the order specified by `fields`.
///
/// - `optbase` contains information on the base struct (if any) from
/// which remaining fields are copied; see comments on `StructBaseInfo`.
pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
ty: Ty<'tcx>,
discr: ty::Disr,
fields: &[(usize, &ast::Expr)],
optbase: Option<StructBaseInfo<'a, 'tcx>>,
dest: Dest,
debug_location: DebugLoc)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_adt");
let fcx = bcx.fcx;
let repr = adt::represent_type(bcx.ccx(), ty);
debug_location.apply(bcx.fcx);
// If we don't care about the result, just make a
// temporary stack slot
let addr = match dest {
SaveIn(pos) => pos,
Ignore => alloc_ty(bcx, ty, "temp"),
};
// This scope holds intermediates that must be cleaned should
// panic occur before the ADT as a whole is ready.
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
if ty.is_simd(bcx.tcx()) {
// Issue 23112: The original logic appeared vulnerable to same
// order-of-eval bug. But, SIMD values are tuple-structs;
// i.e. functional record update (FRU) syntax is unavailable.
//
// To be safe, double-check that we did not get here via FRU.
assert!(optbase.is_none());
// This is the constructor of a SIMD type, such types are
// always primitive machine types and so do not have a
// destructor or require any clean-up.
let llty = type_of::type_of(bcx.ccx(), ty);
// keep a vector as a register, and running through the field
// `insertelement`ing them directly into that register
// (i.e. avoid GEPi and `store`s to an alloca) .
let mut vec_val = C_undef(llty);
for &(i, ref e) in fields {
let block_datum = trans(bcx, &**e);
bcx = block_datum.bcx;
let position = C_uint(bcx.ccx(), i);
let value = block_datum.datum.to_llscalarish(bcx);
vec_val = InsertElement(bcx, vec_val, value, position);
}
Store(bcx, vec_val, addr);
} else if let Some(base) = optbase {
// Issue 23112: If there is a base, then order-of-eval
// requires field expressions eval'ed before base expression.
// First, trans field expressions to temporary scratch values.
let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
let datum = unpack_datum!(bcx, trans(bcx, &**e));
(i, datum)
}).collect();
debug_location.apply(bcx.fcx);
// Second, trans the base to the dest.
assert_eq!(discr, 0);
match expr_kind(bcx.tcx(), &*base.expr) {
ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
bcx = trans_into(bcx, &*base.expr, SaveIn(addr));
},
ExprKind::RvalueStmt => {
bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
}
_ => {
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
for &(i, t) in &base.fields {
let datum = base_datum.get_element(
bcx, t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i));
assert!(type_is_sized(bcx.tcx(), datum.ty));
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
bcx = datum.store_to(bcx, dest);
}
}
}
// Finally, move scratch field values into actual field locations
for (i, datum) in scratch_vals {
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
bcx = datum.store_to(bcx, dest);
}
} else {
// No base means we can write all fields directly in place.
for &(i, ref e) in fields {
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
let e_ty = expr_ty_adjusted(bcx, &**e);
bcx = trans_into(bcx, &**e, SaveIn(dest));
let scope = cleanup::CustomScope(custom_cleanup_scope);
fcx.schedule_lifetime_end(scope, dest);
fcx.schedule_drop_mem(scope, dest, e_ty);
}
}
adt::trans_set_discr(bcx, &*repr, addr, discr);
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
// If we don't care about the result drop the temporary we made
match dest {
SaveIn(_) => bcx,
Ignore => {
bcx = glue::drop_ty(bcx, addr, ty, debug_location);
base::call_lifetime_end(bcx, addr);
bcx
}
}
}
fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
lit: &ast::Lit)
-> DatumBlock<'blk, 'tcx, Expr> {
// must not be a string constant, that is a RvalueDpsExpr
let _icx = push_ctxt("trans_immediate_lit");
let ty = expr_ty(bcx, expr);
let v = consts::const_lit(bcx.ccx(), expr, lit);
immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
}
fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
op: ast::UnOp,
sub_expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let ccx = bcx.ccx();
let mut bcx = bcx;
let _icx = push_ctxt("trans_unary_datum");
let method_call = MethodCall::expr(expr.id);
// The only overloaded operator that is translated to a datum
// is an overloaded deref, since it is always yields a `&T`.
// Otherwise, we should be in the RvalueDpsExpr path.
assert!(op == ast::UnDeref || !ccx.tcx().is_method_call(expr.id));
let un_ty = expr_ty(bcx, expr);
let debug_loc = expr.debug_loc();
match op {
ast::UnNot => {
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
}
ast::UnNeg => {
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
let val = datum.to_llscalarish(bcx);
let (bcx, llneg) = {
if un_ty.is_fp() {
let result = FNeg(bcx, val, debug_loc);
(bcx, result)
} else {
let is_signed = un_ty.is_signed();
let result = Neg(bcx, val, debug_loc);
let bcx = if bcx.ccx().check_overflow() && is_signed {
let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
let is_min = ICmp(bcx, llvm::IntEQ, val,
C_integral(llty, min, true), debug_loc);
with_cond(bcx, is_min, |bcx| {
let msg = InternedString::new(
"attempted to negate with overflow");
controlflow::trans_fail(bcx, expr_info(expr), msg)
})
} else {
bcx
};
(bcx, result)
}
};
immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
}
ast::UnUniq => {
trans_uniq_expr(bcx, expr, un_ty, sub_expr, expr_ty(bcx, sub_expr))
}
ast::UnDeref => {
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
deref_once(bcx, expr, datum, method_call)
}
}
}
fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
box_expr: &ast::Expr,
box_ty: Ty<'tcx>,
contents: &ast::Expr,
contents_ty: Ty<'tcx>)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_uniq_expr");
let fcx = bcx.fcx;
assert!(type_is_sized(bcx.tcx(), contents_ty));
let llty = type_of::type_of(bcx.ccx(), contents_ty);
let size = llsize_of(bcx.ccx(), llty);
let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
let llty_ptr = llty.ptr_to();
let Result { bcx, val } = malloc_raw_dyn(bcx,
llty_ptr,
box_ty,
size,
align,
box_expr.debug_loc());
// Unique boxes do not allocate for zero-size types. The standard library
// may assume that `free` is never called on the pointer returned for
// `Box<ZeroSizeType>`.
let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
trans_into(bcx, contents, SaveIn(val))
} else {
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
val, cleanup::HeapExchange, contents_ty);
let bcx = trans_into(bcx, contents, SaveIn(val));
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
bcx
};
immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
}
fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lval: Datum<'tcx, Lvalue>)
-> DatumBlock<'blk, 'tcx, Expr> {
let dest_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), lval.ty);
let scratch = rvalue_scratch_datum(bcx, dest_ty, "__fat_ptr");
memcpy_ty(bcx, scratch.val, lval.val, scratch.ty);
DatumBlock::new(bcx, scratch.to_expr_datum())
}
fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
subexpr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_addr_of");
let mut bcx = bcx;
let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
if !type_is_sized(bcx.tcx(), sub_datum.ty) {
// DST lvalue, close to a fat pointer
ref_fat_ptr(bcx, sub_datum)
} else {
// Sized value, ref to a thin pointer
let ty = expr_ty(bcx, expr);
immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
}
}
// Important to get types for both lhs and rhs, because one might be _|_
// and the other not.
fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
binop_expr: &ast::Expr,
binop_ty: Ty<'tcx>,
op: ast::BinOp,
lhs_t: Ty<'tcx>,
lhs: ValueRef,
rhs_t: Ty<'tcx>,
rhs: ValueRef)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_eager_binop");
let tcx = bcx.tcx();
let is_simd = lhs_t.is_simd(tcx);
let intype = if is_simd {
lhs_t.simd_type(tcx)
} else {
lhs_t
};
let is_float = intype.is_fp();
let is_signed = intype.is_signed();
let info = expr_info(binop_expr);
let binop_debug_loc = binop_expr.debug_loc();
let mut bcx = bcx;
let val = match op.node {
ast::BiAdd => {
if is_float {
FAdd(bcx, lhs, rhs, binop_debug_loc)
} else if is_simd {
Add(bcx, lhs, rhs, binop_debug_loc)
} else {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
}
ast::BiSub => {
if is_float {
FSub(bcx, lhs, rhs, binop_debug_loc)
} else if is_simd {
Sub(bcx, lhs, rhs, binop_debug_loc)
} else {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
}
ast::BiMul => {
if is_float {
FMul(bcx, lhs, rhs, binop_debug_loc)
} else if is_simd {
Mul(bcx, lhs, rhs, binop_debug_loc)
} else {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
}
ast::BiDiv => {
if is_float {
FDiv(bcx, lhs, rhs, binop_debug_loc)
} else {
// Only zero-check integers; fp /0 is NaN
bcx = base::fail_if_zero_or_overflows(bcx,
expr_info(binop_expr),
op,
lhs,
rhs,
rhs_t);
if is_signed {
SDiv(bcx, lhs, rhs, binop_debug_loc)
} else {
UDiv(bcx, lhs, rhs, binop_debug_loc)
}
}
}
ast::BiRem => {
if is_float {
FRem(bcx, lhs, rhs, binop_debug_loc)
} else {
// Only zero-check integers; fp %0 is NaN
bcx = base::fail_if_zero_or_overflows(bcx,
expr_info(binop_expr),
op, lhs, rhs, rhs_t);
if is_signed {
SRem(bcx, lhs, rhs, binop_debug_loc)
} else {
URem(bcx, lhs, rhs, binop_debug_loc)
}
}
}
ast::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
ast::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
ast::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
ast::BiShl => {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
ast::BiShr => {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
ast::BiEq | ast::BiNe | ast::BiLt | ast::BiGe | ast::BiLe | ast::BiGt => {
if is_simd {
base::compare_simd_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
} else {
base::compare_scalar_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
}
}
_ => {
bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
}
};
immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
}
// refinement types would obviate the need for this
enum lazy_binop_ty {
lazy_and,
lazy_or,
}
fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
binop_expr: &ast::Expr,
op: lazy_binop_ty,
a: &ast::Expr,
b: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_lazy_binop");
let binop_ty = expr_ty(bcx, binop_expr);
let fcx = bcx.fcx;
let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
let lhs = lhs.to_llscalarish(past_lhs);
if past_lhs.unreachable.get() {
return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
}
let join = fcx.new_id_block("join", binop_expr.id);
let before_rhs = fcx.new_id_block("before_rhs", b.id);
match op {
lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
}
let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
let rhs = rhs.to_llscalarish(past_rhs);
if past_rhs.unreachable.get() {
return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
}
Br(past_rhs, join.llbb, DebugLoc::None);
let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
&[past_lhs.llbb, past_rhs.llbb]);
return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
}
fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
op: ast::BinOp,
lhs: &ast::Expr,
rhs: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_binary");
let ccx = bcx.ccx();
// if overloaded, would be RvalueDpsExpr
assert!(!ccx.tcx().is_method_call(expr.id));
match op.node {
ast::BiAnd => {
trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
}
ast::BiOr => {
trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
}
_ => {
let mut bcx = bcx;
let lhs_datum = unpack_datum!(bcx, trans(bcx, lhs));
let rhs_datum = unpack_datum!(bcx, trans(bcx, rhs));
let binop_ty = expr_ty(bcx, expr);
debug!("trans_binary (expr {}): lhs_datum={}",
expr.id,
lhs_datum.to_string(ccx));
let lhs_ty = lhs_datum.ty;
let lhs = lhs_datum.to_llscalarish(bcx);
debug!("trans_binary (expr {}): rhs_datum={}",
expr.id,
rhs_datum.to_string(ccx));
let rhs_ty = rhs_datum.ty;
let rhs = rhs_datum.to_llscalarish(bcx);
trans_eager_binop(bcx, expr, binop_ty, op,
lhs_ty, lhs, rhs_ty, rhs)
}
}
}
fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
method_call: MethodCall,
lhs: Datum<'tcx, Expr>,
rhs: Option<(Datum<'tcx, Expr>, ast::NodeId)>,
dest: Option<Dest>,
autoref: bool)
-> Result<'blk, 'tcx> {
callee::trans_call_inner(bcx,
expr.debug_loc(),
|bcx, arg_cleanup_scope| {
meth::trans_method_callee(bcx,
method_call,
None,
arg_cleanup_scope)
},
callee::ArgOverloadedOp(lhs, rhs, autoref),
dest)
}
fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
callee: &'a ast::Expr,
args: &'a [P<ast::Expr>],
dest: Option<Dest>)
-> Block<'blk, 'tcx> {
debug!("trans_overloaded_call {}", expr.id);
let method_call = MethodCall::expr(expr.id);
let mut all_args = vec!(callee);
all_args.extend(args.iter().map(|e| &**e));
unpack_result!(bcx,
callee::trans_call_inner(bcx,
expr.debug_loc(),
|bcx, arg_cleanup_scope| {
meth::trans_method_callee(
bcx,
method_call,
None,
arg_cleanup_scope)
},
callee::ArgOverloadedCall(all_args),
dest));
bcx
}
pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>,
expr: &ast::Expr,
t_in: Ty<'tcx>,
t_out: Ty<'tcx>)
-> bool {
if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
return true;
}
match (t_in.builtin_deref(true), t_out.builtin_deref(true)) {
(Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
t_in == t_out
}
_ => {
// This condition isn't redundant with the check for CoercionCast:
// different types can be substituted into the same type, and
// == equality can be overconservative if there are regions.
t_in == t_out
}
}
}
fn
|
<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
id: ast::NodeId)
-> DatumBlock<'blk, 'tcx, Expr>
{
use middle::cast::CastTy::*;
use middle::cast::IntTy::*;
fn int_cast(bcx: Block,
lldsttype: Type,
llsrctype: Type,
llsrc: ValueRef,
signed: bool)
-> ValueRef
{
let _icx = push_ctxt("int_cast");
let srcsz = llsrctype.int_width();
let dstsz = lldsttype.int_width();
return if dstsz == srcsz {
BitCast(bcx, llsrc, lldsttype)
} else if srcsz > dstsz {
TruncOrBitCast(bcx, llsrc, lldsttype)
} else if signed {
SExtOrBitCast(bcx, llsrc, lldsttype)
} else {
ZExtOrBitCast(bcx, llsrc, lldsttype)
}
}
fn float_cast(bcx: Block,
lldsttype: Type,
llsrctype: Type,
llsrc: ValueRef)
-> ValueRef
{
let _icx = push_ctxt("float_cast");
let srcsz = llsrctype.float_width();
let dstsz = lldsttype.float_width();
return if dstsz > srcsz {
FPExt(bcx, llsrc, lldsttype)
} else if srcsz > dstsz {
FPTrunc(bcx, llsrc, lldsttype)
} else { llsrc };
}
let _icx = push_ctxt("trans_cast");
let mut bcx = bcx;
let ccx = bcx.ccx();
let t_in = expr_ty_adjusted(bcx, expr);
let t_out = node_id_type(bcx, id);
debug!("trans_cast({:?} as {:?})", t_in, t_out);
let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
let ll_t_out = type_of::arg_type_of(ccx, t_out);
// Convert the value to be cast into a ValueRef, either by-ref or
// by-value as appropriate given its type:
let mut datum = unpack_datum!(bcx, trans(bcx, expr));
let datum_ty = monomorphize_type(bcx, datum.ty);
if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
datum.ty = t_out;
return DatumBlock::new(bcx, datum);
}
if type_is_fat_ptr(bcx.tcx(), t_in) {
assert!(datum.kind.is_by_ref());
if type_is_fat_ptr(bcx.tcx(), t_out) {
return DatumBlock::new(bcx, Datum::new(
PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
t_out,
Rvalue::new(ByRef)
)).to_expr_datumblock();
} else {
// Return the address
return immediate_rvalue_bcx(bcx,
PointerCast(bcx,
Load(bcx, get_dataptr(bcx, datum.val)),
ll_t_out),
t_out).to_expr_datumblock();
}
}
let r_t_in = CastTy::from_ty(bcx.tcx(), t_in).expect("bad input type for cast");
let r_t_out = CastTy::from_ty(bcx.tcx(), t_out).expect("bad output type for cast");
let (llexpr, signed) = if let Int(CEnum) = r_t_in {
let repr = adt::represent_type(ccx, t_in);
let datum = unpack_datum!(
bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
let llexpr_ptr = datum.to_llref();
let discr = adt::trans_get_discr(bcx, &*repr, llexpr_ptr, Some(Type::i64(ccx)));
ll_t_in = val_ty(discr);
(discr, adt::is_discr_signed(&*repr))
} else {
(datum.to_llscalarish(bcx), t_in.is_signed())
};
let newval = match (r_t_in, r_t_out) {
(Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
PointerCast(bcx, llexpr, ll_t_out)
}
(Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
(Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
(Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
(Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
(Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
(Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
(Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
(Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
_ => ccx.sess().span_bug(expr.span,
&format!("translating unsupported cast: \
{:?} -> {:?}",
t_in,
t_out)
)
};
return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
}
fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
op: ast::BinOp,
dst: &ast::Expr,
src: &ast::Expr)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_assign_op");
let mut bcx = bcx;
debug!("trans_assign_op(expr={:?})", expr);
// User-defined operator methods cannot be used with `+=` etc right now
assert!(!bcx.tcx().is_method_call(expr.id));
// Evaluate LHS (destination), which should be an lvalue
let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
assert!(!bcx.fcx.type_needs_drop(dst_datum.ty));
let dst_ty = dst_datum.ty;
let dst = load_ty(bcx, dst_datum.val, dst_datum.ty);
// Evaluate RHS
let rhs_datum = unpack_datum!(bcx, trans(bcx, &*src));
let rhs_ty = rhs_datum.ty;
let rhs = rhs_datum.to_llscalarish(bcx);
// Perform computation and store the result
let result_datum = unpack_datum!(
bcx, trans_eager_binop(bcx, expr, dst_datum.ty, op,
dst_ty, dst, rhs_ty, rhs));
return result_datum.store_to(bcx, dst_datum.val);
}
fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
datum: Datum<'tcx, Expr>,
expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
// Ensure cleanup of `datum` if not already scheduled and obtain
// a "by ref" pointer.
let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
// Compute final type. Note that we are loose with the region and
// mutability, since those things don't matter in trans.
let referent_ty = lv_datum.ty;
let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
// Get the pointer.
let llref = lv_datum.to_llref();
// Construct the resulting datum, using what was the "by ref"
// ValueRef of type `referent_ty` to be the "by value" ValueRef
// of type `&referent_ty`.
// Pointers to DST types are non-immediate, and therefore still use ByRef.
let kind = if type_is_sized(bcx.tcx(), referent_ty) { ByValue } else { ByRef };
DatumBlock::new(bcx, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(kind))))
}
fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>,
times: usize)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let mut datum = datum;
for i in 0..times {
let method_call = MethodCall::autoderef(expr.id, i as u32);
datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
}
DatumBlock { bcx: bcx, datum: datum }
}
fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>,
method_call: MethodCall)
-> DatumBlock<'blk, 'tcx, Expr> {
let ccx = bcx.ccx();
debug!("deref_once(expr={:?}, datum={}, method_call={:?})",
expr,
datum.to_string(ccx),
method_call);
let mut bcx = bcx;
// Check for overloaded deref.
let method_ty = ccx.tcx()
.tables
.borrow()
.method_map
.get(&method_call).map(|method| method.ty);
let datum = match method_ty {
Some(method_ty) => {
let method_ty = monomorphize_type(bcx, method_ty);
// Overloaded. Evaluate `trans_overloaded_op`, which will
// invoke the user's deref() method, which basically
// converts from the `Smaht<T>` pointer that we have into
// a `&T` pointer. We can then proceed down the normal
// path (below) to dereference that `&T`.
let datum = if method_call.autoderef == 0 {
datum
} else {
// Always perform an AutoPtr when applying an overloaded auto-deref
unpack_datum!(bcx, auto_ref(bcx, datum, expr))
};
let ref_ty = // invoked methods have their LB regions instantiated
ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
datum, None, Some(SaveIn(scratch.val)),
false));
scratch.to_expr_datum()
}
None => {
// Not overloaded. We already have a pointer we know how to deref.
datum
}
};
let r = match datum.ty.sty {
ty::TyBox(content_ty) => {
// Make sure we have an lvalue datum here to get the
// proper cleanups scheduled
let datum = unpack_datum!(
bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
if type_is_sized(bcx.tcx(), content_ty) {
let ptr = load_ty(bcx, datum.val, datum.ty);
DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr))
} else {
// A fat pointer and a DST lvalue have the same representation
// just different types. Since there is no temporary for `*e`
// here (because it is unsized), we cannot emulate the sized
// object code path for running drop glue and free. Instead,
// we schedule cleanup for `e`, turning it into an lvalue.
let datum = Datum::new(datum.val, content_ty, LvalueExpr);
DatumBlock::new(bcx, datum)
}
}
ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
if type_is_sized(bcx.tcx(), content_ty) {
let ptr = datum.to_llscalarish(bcx);
// Always generate an lvalue datum, even if datum.mode is
// an rvalue. This is because datum.mode is only an
// rvalue for non-owning pointers like &T or *T, in which
// case cleanup *is* scheduled elsewhere, by the true
// owner (or, in the case of *T, by the user).
DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr))
} else {
// A fat pointer and a DST lvalue have the same representation
// just different types.
DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr))
}
}
_ => {
bcx.tcx().sess.span_bug(
expr.span,
&format!("deref invoked on expr of illegal type {:?}",
datum.ty));
}
};
debug!("deref_once(expr={}, method_call={:?}, result={})",
expr.id, method_call, r.datum.to_string(ccx));
return r;
}
#[derive(Debug)]
enum OverflowOp {
Add,
Sub,
Mul,
Shl,
Shr,
}
impl OverflowOp {
fn codegen_strategy(&self) -> OverflowCodegen {
use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
match *self {
OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
}
}
}
enum OverflowCodegen {
ViaIntrinsic(OverflowOpViaIntrinsic),
ViaInputCheck(OverflowOpViaInputCheck),
}
enum OverflowOpViaInputCheck { Shl, Shr, }
#[derive(Debug)]
enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
impl OverflowOpViaIntrinsic {
fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
bcx.ccx().get_intrinsic(&name)
}
fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
use middle::ty::{TyInt, TyUint};
let new_sty = match ty.sty {
TyInt(TyIs) => match &tcx.sess.target.target.target_pointer_width[..] {
"32" => TyInt(TyI32),
"64" => TyInt(TyI64),
_ => panic!("unsupported target word size")
},
TyUint(TyUs) => match &tcx.sess.target.target.target_pointer_width[..] {
"32" => TyUint(TyU32),
"64" => TyUint(TyU64),
_ => panic!("unsupported target word size")
},
ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
_ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
*self)
};
match *self {
OverflowOpViaIntrinsic::Add => match new_sty {
TyInt(TyI8) => "llvm.sadd.with.overflow.i8",
TyInt(TyI16) => "llvm.sadd.with.overflow.i16",
TyInt(TyI32) => "llvm.sadd.with.overflow.i32",
TyInt(TyI64) => "llvm.sadd.with.overflow.i64",
TyUint(TyU8) => "llvm.uadd.with.overflow.i8",
TyUint(TyU16) => "llvm.uadd.with.overflow.i16",
TyUint(TyU32) => "llvm.uadd.with.overflow.i32",
TyUint(TyU64) => "llvm.uadd.with.overflow.i64",
_ => unreachable!(),
},
OverflowOpViaIntrinsic::Sub => match new_sty {
TyInt(TyI8) => "llvm.ssub.with.overflow.i8",
TyInt(TyI16) => "llvm.ssub.with.overflow.i16",
TyInt(TyI32) => "llvm.ssub.with.overflow.i32",
TyInt(TyI64) => "llvm.ssub.with.overflow.i64",
TyUint(TyU8) => "llvm.usub.with.overflow.i8",
TyUint(TyU16) => "llvm.usub.with.overflow.i16",
TyUint(TyU32) => "llvm.usub.with.overflow.i32",
TyUint(TyU64) => "llvm.usub.with.overflow.i64",
_ => unreachable!(),
},
OverflowOpViaIntrinsic::Mul => match new_sty {
TyInt(TyI8) => "llvm.smul.with.overflow.i8",
TyInt(TyI16) => "llvm.smul.with.overflow.i16",
TyInt(TyI32) => "llvm.smul.with.overflow.i32",
TyInt(TyI64) => "llvm.smul.with.overflow.i64",
TyUint(TyU8) => "llvm.umul.with.overflow.i8",
TyUint(TyU16) => "llvm.umul.with.overflow.i16",
TyUint(TyU32) => "llvm.umul.with.overflow.i32",
TyUint(TyU64) => "llvm.umul.with.overflow.i64",
_ => unreachable!(),
},
}
}
fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
info: NodeIdAndSpan,
lhs_t: Ty<'tcx>, lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc)
-> (Block<'blk, 'tcx>, ValueRef) {
let llfn = self.to_intrinsic(bcx, lhs_t);
let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
let result = ExtractValue(bcx, val, 0); // iN operation result
let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
binop_debug_loc);
let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
None, binop_debug_loc);
let bcx =
base::with_cond(bcx, cond, |bcx|
controlflow::trans_fail(bcx, info,
InternedString::new("arithmetic operation overflowed")));
(bcx, result)
}
}
impl OverflowOpViaInputCheck {
fn build_with_input_check<'blk, 'tcx>(&self,
bcx: Block<'blk, 'tcx>,
info: NodeIdAndSpan,
lhs_t: Ty<'tcx>,
lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc)
-> (Block<'blk, 'tcx>, ValueRef)
{
let lhs_llty = val_ty(lhs);
let rhs_llty = val_ty(rhs);
// Panic if any bits are set outside of bits that we always
// mask in.
//
// Note that the mask's value is derived from the LHS type
// (since that is where the 32/64 distinction is relevant) but
// the mask's type must match the RHS type (since they will
// both be fed into a and-binop)
let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
let result = match *self {
OverflowOpViaInputCheck::Shl =>
build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
OverflowOpViaInputCheck::Shr =>
build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
};
let bcx =
base::with_cond(bcx, cond, |bcx|
controlflow::trans_fail(bcx, info,
InternedString::new("shift operation overflowed")));
(bcx, result)
}
}
fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
llty: Type,
mask_llty: Type,
invert: bool) -> ValueRef {
let kind = llty.kind();
match kind {
TypeKind::Integer => {
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
let val = llty.int_width() - 1;
if invert {
C_integral(mask_llty, !val, true)
} else {
C_integral(mask_llty, val, false)
}
},
TypeKind::Vector => {
let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
VectorSplat(bcx, mask_llty.vector_length(), mask)
},
_ => panic!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
}
}
// Check if an integer or vector contains a nonzero element.
fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
value: ValueRef,
binop_debug_loc: DebugLoc) -> ValueRef {
let llty = val_ty(value);
let kind = llty.kind();
match kind {
TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
TypeKind::Vector => {
// Check if any elements of the vector are nonzero by treating
// it as a wide integer and checking if the integer is nonzero.
let width = llty.vector_length() as u64 * llty.element_type().int_width();
let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
build_nonzero_check(bcx, int_value, binop_debug_loc)
},
_ => panic!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
}
}
// To avoid UB from LLVM, these two functions mask RHS with an
// appropriate mask unconditionally (i.e. the fallback behavior for
// all shifts). For 32- and 64-bit types, this matches the semantics
// of Java. (See related discussion on #1877 and #10183.)
fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc) -> ValueRef {
let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShl, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
Shl(bcx, lhs, rhs, binop_debug_loc)
}
fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs_t: Ty<'tcx>,
lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc) -> ValueRef {
let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShr, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
let tcx = bcx.tcx();
let is_simd = lhs_t.is_simd(tcx);
let intype = if is_simd {
lhs_t.simd_type(tcx)
} else {
lhs_t
};
let is_signed = intype.is_signed();
if is_signed {
AShr(bcx, lhs, rhs, binop_debug_loc)
} else {
LShr(bcx, lhs, rhs, binop_debug_loc)
}
}
fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
rhs: ValueRef,
debug_loc: DebugLoc) -> ValueRef {
let rhs_llty = val_ty(rhs);
And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
}
fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
lhs_t: Ty<'tcx>, lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc)
-> (Block<'blk, 'tcx>, ValueRef) {
if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
if bcx.ccx().check_overflow() {
match oop.codegen_strategy() {
OverflowCodegen::ViaIntrinsic(oop) =>
oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
OverflowCodegen::ViaInputCheck(oop) =>
oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
}
} else {
let res = match oop {
OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Shl =>
build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Shr =>
build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
};
(bcx, res)
}
}
/// We categorize expressions into three kinds. The distinction between
/// lvalue/rvalue is fundamental to the language. The distinction between the
/// two kinds of rvalues is an artifact of trans which reflects how we will
/// generate code for that kind of expression. See trans/expr.rs for more
/// information.
#[derive(Copy, Clone)]
enum ExprKind {
Lvalue,
RvalueDps,
RvalueDatum,
RvalueStmt
}
fn expr_kind(tcx: &ty::ctxt, expr: &ast::Expr) -> ExprKind {
if tcx.is_method_call(expr.id) {
// Overloaded operations are generally calls, and hence they are
// generated via DPS, but there are a few exceptions:
return match expr.node {
// `a += b` has a unit result.
ast::ExprAssignOp(..) => ExprKind::RvalueStmt,
// the deref method invoked for `*a` always yields an `&T`
ast::ExprUnary(ast::UnDeref, _) => ExprKind::Lvalue,
// the index method invoked for `a[i]` always yields an `&T`
ast::ExprIndex(..) => ExprKind::Lvalue,
// in the general case, result could be any type, use DPS
_ => ExprKind::RvalueDps
};
}
match expr.node {
ast::ExprPath(..) => {
match tcx.resolve_expr(expr) {
def::DefStruct(_) | def::DefVariant(..) => {
if let ty::TyBareFn(..) = tcx.node_id_to_type(expr.id).sty {
// ctor function
ExprKind::RvalueDatum
} else {
ExprKind::RvalueDps
}
}
// Special case: A unit like struct's constructor must be called without () at the
// end (like `UnitStruct`) which means this is an ExprPath to a DefFn. But in case
// of unit structs this is should not be interpreted as function pointer but as
// call to the constructor.
def::DefFn(_, true) => ExprKind::RvalueDps,
// Fn pointers are just scalar values.
def::DefFn(..) | def::DefMethod(..) => ExprKind::RvalueDatum,
// Note: there is actually a good case to be made that
// DefArg's, particularly those of immediate type, ought to
// considered rvalues.
def::DefStatic(..) |
def::DefUpvar(..) |
def::DefLocal(..) => ExprKind::Lvalue,
def::DefConst(..) |
def::DefAssociatedConst(..) => ExprKind::RvalueDatum,
def => {
tcx.sess.span_bug(
expr.span,
&format!("uncategorized def for expr {}: {:?}",
expr.id,
def));
}
}
}
ast::ExprUnary(ast::UnDeref, _) |
ast::ExprField(..) |
ast::ExprTupField(..) |
ast::ExprIndex(..) => {
ExprKind::Lvalue
}
ast::ExprCall(..) |
ast::ExprMethodCall(..) |
ast::ExprStruct(..) |
ast::ExprRange(..) |
ast::ExprTup(..) |
ast::ExprIf(..) |
ast::ExprMatch(..) |
ast::ExprClosure(..) |
ast::ExprBlock(..) |
ast::ExprRepeat(..) |
ast::ExprVec(..) => {
ExprKind::RvalueDps
}
ast::ExprIfLet(..) => {
tcx.sess.span_bug(expr.span, "non-desugared ExprIfLet");
}
ast::ExprWhileLet(..) => {
tcx.sess.span_bug(expr.span, "non-desugared ExprWhileLet");
}
ast::ExprForLoop(..) => {
tcx.sess.span_bug(expr.span, "non-desugared ExprForLoop");
}
ast::ExprLit(ref lit) if ast_util::lit_is_str(&**lit) => {
ExprKind::RvalueDps
}
ast::ExprBreak(..) |
ast::ExprAgain(..) |
ast::ExprRet(..) |
ast::ExprWhile(..) |
ast::ExprLoop(..) |
ast::ExprAssign(..) |
ast::ExprInlineAsm(..) |
ast::ExprAssignOp(..) => {
ExprKind::RvalueStmt
}
ast::ExprLit(_) | // Note: LitStr is carved out above
ast::ExprUnary(..) |
ast::ExprBox(None, _) |
ast::ExprAddrOf(..) |
ast::ExprBinary(..) |
ast::ExprCast(..) => {
ExprKind::RvalueDatum
}
ast::ExprBox(Some(ref place), _) => {
// Special case `Box<T>` for now:
let def_id = match tcx.def_map.borrow().get(&place.id) {
Some(def) => def.def_id(),
None => panic!("no def for place"),
};
if tcx.lang_items.exchange_heap() == Some(def_id) {
ExprKind::RvalueDatum
} else {
ExprKind::RvalueDps
}
}
ast::ExprParen(ref e) => expr_kind(tcx, &**e),
ast::ExprMac(..) => {
tcx.sess.span_bug(
expr.span,
"macro expression remains after expansion");
}
}
}
|
trans_imm_cast
|
baseline_blog.py
|
from django import template
from baselinecore.models import BlogPage
register = template.Library()
@register.simple_tag(takes_context=True)
def
|
(context):
context.update(
{'latest_blog_posts': BlogPage.objects.live().order_by('-datetime')[:5]}
)
return ""
|
get_latest_posts
|
ContentModelsDataList.tsx
|
import React, { useCallback, useRef } from "react";
import TimeAgo from "timeago-react";
import { useRouter } from "@webiny/react-router";
import { css } from "emotion";
import get from "lodash/get";
import { ConfirmationDialog } from "@webiny/ui/ConfirmationDialog";
import { DeleteIcon, EditIcon } from "@webiny/ui/List/DataList/icons";
import { ReactComponent as ViewListIcon } from "@webiny/app-headless-cms/admin/icons/view_list.svg";
import { DELETE_CONTENT_MODEL } from "../../viewsGraphql";
import { useApolloClient } from "@webiny/app-headless-cms/admin/hooks";
import { useSnackbar } from "@webiny/app-admin/hooks/useSnackbar";
import CurrentEnvironmentLabel from "./../../components/CurrentEnvironmentLabel";
import {
DataList,
List,
ListItem,
ListItemText,
ListItemTextSecondary,
ListItemMeta,
ListActions
} from "@webiny/ui/List";
import { IconButton } from "@webiny/ui/Button";
import { Tooltip } from "@webiny/ui/Tooltip";
import { i18n } from "@webiny/app/i18n";
const t = i18n.namespace("FormsApp.ContentModelsDataList");
const rightAlign = css({
alignItems: "flex-end !important",
justifyContent: "center !important"
});
const listItemMinHeight = css({
minHeight: "66px !important"
});
const viewEntriesIconStyles = css({
color: "var(--mdc-theme-text-secondary-on-background)"
});
export type ContentModelsDataListProps = {
id?: any;
dataList: any;
};
const ContentModelsDataList = (props: ContentModelsDataListProps) => {
const { dataList } = props;
const { location, history } = useRouter();
const client = useApolloClient();
const { showSnackbar } = useSnackbar();
const deleteRecord = async item => {
const res = await client.mutate({
mutation: DELETE_CONTENT_MODEL,
variables: { id: item.id },
awaitRefetchQueries: true,
refetchQueries: [
"HeadlessCmsListContentModels",
"HeadlessCmsListMenuContentGroupsModels"
]
});
const { data, error } = get(res, "data.deleteContentModel");
if (data) {
showSnackbar(t`Content model {name} deleted.`({ name: item.name }));
} else {
showSnackbar(error.message, {
title: t`Something unexpected happened.`
});
}
if (item.id === props.id) {
const query = new URLSearchParams(location.search);
query.delete("id");
|
dataList.refresh();
};
const editHandlers = useRef({});
const editRecord = useCallback(contentModel => {
if (!editHandlers.current[contentModel.id]) {
editHandlers.current[contentModel.id] = async () => {
history.push("/cms/content-models/" + contentModel.id);
};
}
return editHandlers.current[contentModel.id];
}, undefined);
const viewContentEntries = useCallback(contentModel => {
return () => history.push("/cms/content-models/manage/" + contentModel.modelId);
}, undefined);
return (
<DataList
{...dataList}
title={t`Content Models`}
actions={<CurrentEnvironmentLabel style={{ justifyContent: "flex-end" }} />}
sorters={[
{
label: t`Newest to oldest`,
sorters: { createdOn: -1 }
},
{
label: t`Oldest to newest`,
sorters: { createdOn: 1 }
},
{
label: t`Title A-Z`,
sorters: { name: 1 }
},
{
label: t`Title Z-A`,
sorters: { name: -1 }
}
]}
>
{({ data = [] }) => (
<List data-testid="default-data-list">
{data.map(contentModel => (
<ListItem key={contentModel.id} className={listItemMinHeight}>
<ListItemText>
{contentModel.name}
{contentModel.createdBy && (
<ListItemTextSecondary>
{contentModel.createdBy.firstName && (
<>
{t`Created by: {user}.`({
user: contentModel.createdBy.firstName
})}{" "}
</>
)}
{t`Last modified: {time}.`({
time: <TimeAgo datetime={contentModel.savedOn} />
})}
</ListItemTextSecondary>
)}
</ListItemText>
<ListItemMeta className={rightAlign}>
<ListActions>
<Tooltip content={t`View content`} placement={"top"}>
<IconButton
icon={
<ViewListIcon className={viewEntriesIconStyles} />
}
label={t`View entries`}
onClick={viewContentEntries(contentModel)}
/>
</Tooltip>
<Tooltip content={t`Edit content model`} placement={"top"}>
<EditIcon onClick={editRecord(contentModel)} />
</Tooltip>
<ConfirmationDialog>
{({ showConfirmation }) => (
<Tooltip
content={t`Delete content model`}
placement={"top"}
>
<DeleteIcon
onClick={() =>
showConfirmation(async () =>
deleteRecord(contentModel)
)
}
/>
</Tooltip>
)}
</ConfirmationDialog>
</ListActions>
</ListItemMeta>
</ListItem>
))}
</List>
)}
</DataList>
);
};
export default ContentModelsDataList;
|
history.push({ search: query.toString() });
}
|
bitslice.rs
|
// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: merge with `bitvec`
use std::mem;
pub type Word = usize;
/// `BitSlice` provides helper methods for treating a `[Word]`
/// as a bitvector.
pub trait BitSlice {
fn clear_bit(&mut self, idx: usize) -> bool;
fn set_bit(&mut self, idx: usize) -> bool;
fn get_bit(&self, idx: usize) -> bool;
}
impl BitSlice for [Word] {
/// Clears bit at `idx` to 0; returns true iff this changed `self.`
fn clear_bit(&mut self, idx: usize) -> bool {
let words = self;
debug!("clear_bit: words={} idx={}",
bits_to_string(words, words.len() * mem::size_of::<Word>()), bit_str(idx));
let BitLookup { word, bit_in_word, bit_mask } = bit_lookup(idx);
debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask);
let oldv = words[word];
let newv = oldv & !bit_mask;
words[word] = newv;
oldv != newv
}
/// Sets bit at `idx` to 1; returns true iff this changed `self.`
fn set_bit(&mut self, idx: usize) -> bool {
let words = self;
debug!("set_bit: words={} idx={}",
bits_to_string(words, words.len() * mem::size_of::<Word>()), bit_str(idx));
let BitLookup { word, bit_in_word, bit_mask } = bit_lookup(idx);
debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask);
let oldv = words[word];
let newv = oldv | bit_mask;
words[word] = newv;
oldv != newv
}
/// Extracts value of bit at `idx` in `self`.
fn get_bit(&self, idx: usize) -> bool {
let words = self;
let BitLookup { word, bit_mask, .. } = bit_lookup(idx);
(words[word] & bit_mask) != 0
}
}
struct BitLookup {
/// An index of the word holding the bit in original `[Word]` of query.
word: usize,
/// Index of the particular bit within the word holding the bit.
bit_in_word: usize,
/// Word with single 1-bit set corresponding to where the bit is located.
bit_mask: Word,
}
#[inline]
fn bit_lookup(bit: usize) -> BitLookup {
let word_bits = mem::size_of::<Word>() * 8;
let word = bit / word_bits;
let bit_in_word = bit % word_bits;
let bit_mask = 1 << bit_in_word;
BitLookup { word: word, bit_in_word: bit_in_word, bit_mask: bit_mask }
}
fn bit_str(bit: Word) -> String
|
pub fn bits_to_string(words: &[Word], bits: usize) -> String {
let mut result = String::new();
let mut sep = '[';
// Note: this is a little endian printout of bytes.
// i tracks how many bits we have printed so far.
let mut i = 0;
for &word in words.iter() {
let mut v = word;
loop { // for each byte in `v`:
let remain = bits - i;
// If less than a byte remains, then mask just that many bits.
let mask = if remain <= 8 { (1 << remain) - 1 } else { 0xFF };
assert!(mask <= 0xFF);
let byte = v & mask;
result.push(sep);
result.push_str(&format!("{:02x}", byte));
if remain <= 8 { break; }
v >>= 8;
i += 8;
sep = '-';
}
}
result.push(']');
return result
}
#[inline]
pub fn bitwise<Op:BitwiseOperator>(out_vec: &mut [usize],
in_vec: &[usize],
op: &Op) -> bool {
assert_eq!(out_vec.len(), in_vec.len());
let mut changed = false;
for (out_elt, in_elt) in out_vec.iter_mut().zip(in_vec) {
let old_val = *out_elt;
let new_val = op.join(old_val, *in_elt);
*out_elt = new_val;
changed |= old_val != new_val;
}
changed
}
pub trait BitwiseOperator {
/// Applies some bit-operation pointwise to each of the bits in the two inputs.
fn join(&self, pred1: usize, pred2: usize) -> usize;
}
pub struct Union;
impl BitwiseOperator for Union {
#[inline]
fn join(&self, a: usize, b: usize) -> usize { a | b }
}
pub struct Subtract;
impl BitwiseOperator for Subtract {
#[inline]
fn join(&self, a: usize, b: usize) -> usize { a & !b }
}
|
{
let byte = bit >> 3;
let lobits = 1 << (bit & 0b111);
format!("[{}:{}-{:02x}]", bit, byte, lobits)
}
|
index.ts
|
export { default, default as AppQuery } from "./AppQuery";
|
||
playlist_play.go
|
package api
import (
"sort"
"strconv"
"github.com/fingerpich/grafana-farsi/pkg/api/dtos"
"github.com/fingerpich/grafana-farsi/pkg/bus"
_ "github.com/fingerpich/grafana-farsi/pkg/log"
m "github.com/fingerpich/grafana-farsi/pkg/models"
"github.com/fingerpich/grafana-farsi/pkg/services/search"
)
func populateDashboardsById(dashboardByIds []int64, dashboardIdOrder map[int64]int) (dtos.PlaylistDashboardsSlice, error) {
result := make(dtos.PlaylistDashboardsSlice, 0)
if len(dashboardByIds) > 0 {
dashboardQuery := m.GetDashboardsQuery{DashboardIds: dashboardByIds}
if err := bus.Dispatch(&dashboardQuery); err != nil {
return result, err
}
for _, item := range dashboardQuery.Result {
result = append(result, dtos.PlaylistDashboard{
Id: item.Id,
Slug: item.Slug,
Title: item.Title,
Uri: "db/" + item.Slug,
Order: dashboardIdOrder[item.Id],
})
}
}
return result, nil
}
func populateDashboardsByTag(orgId int64, signedInUser *m.SignedInUser, dashboardByTag []string, dashboardTagOrder map[string]int) dtos.PlaylistDashboardsSlice {
result := make(dtos.PlaylistDashboardsSlice, 0)
if len(dashboardByTag) > 0 {
for _, tag := range dashboardByTag {
searchQuery := search.Query{
Title: "",
Tags: []string{tag},
|
SignedInUser: signedInUser,
Limit: 100,
IsStarred: false,
OrgId: orgId,
}
if err := bus.Dispatch(&searchQuery); err == nil {
for _, item := range searchQuery.Result {
result = append(result, dtos.PlaylistDashboard{
Id: item.Id,
Title: item.Title,
Uri: item.Uri,
Order: dashboardTagOrder[tag],
})
}
}
}
}
return result
}
func LoadPlaylistDashboards(orgId int64, signedInUser *m.SignedInUser, playlistId int64) (dtos.PlaylistDashboardsSlice, error) {
playlistItems, _ := LoadPlaylistItems(playlistId)
dashboardByIds := make([]int64, 0)
dashboardByTag := make([]string, 0)
dashboardIdOrder := make(map[int64]int)
dashboardTagOrder := make(map[string]int)
for _, i := range playlistItems {
if i.Type == "dashboard_by_id" {
dashboardId, _ := strconv.ParseInt(i.Value, 10, 64)
dashboardByIds = append(dashboardByIds, dashboardId)
dashboardIdOrder[dashboardId] = i.Order
}
if i.Type == "dashboard_by_tag" {
dashboardByTag = append(dashboardByTag, i.Value)
dashboardTagOrder[i.Value] = i.Order
}
}
result := make(dtos.PlaylistDashboardsSlice, 0)
var k, _ = populateDashboardsById(dashboardByIds, dashboardIdOrder)
result = append(result, k...)
result = append(result, populateDashboardsByTag(orgId, signedInUser, dashboardByTag, dashboardTagOrder)...)
sort.Sort(result)
return result, nil
}
| |
0005_metaattributes_image.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-04-04 05:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
import django.db.models.deletion
import filer.fields.image
class
|
(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('meta', '0004_auto_20180322_1026'),
]
operations = [
migrations.AddField(
model_name='metaattributes',
name='image',
field=filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.FILER_IMAGE_MODEL),
),
]
|
Migration
|
app.py
|
import pickle
import traceback
import numpy as np
from flask import Flask, request
from config import MODELPATH, DEBUG
app = Flask(__name__)
model = pickle.load(open(MODELPATH, 'rb'))
@app.route("/predict", methods=["POST"])
def predict():
|
if __name__ == "__main__":
app.run(debug=DEBUG)
|
"""{"input": [5.8, 2.8, 5.1, 2.4]}"""
try:
content = request.json
sample = content["input"]
sample = np.array(sample).reshape(1, -1)
prediction = model.predict(sample).tolist()[0]
return {"prediction": prediction}
except Exception as e:
tb = traceback.format_exc()
return {"errorMessages": tb.replace("\n","")}
|
main.go
|
package main
import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/mitchellh/go-homedir"
"github.com/olekukonko/tablewriter"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/eluv-io/contracts/cmds/ethverify/abidiff"
"github.com/eluv-io/contracts/cmds/ethverify/gitutils"
)
const (
DefaultElvFolder = ".eluvio/ethverify"
)
var (
ostream log.Handler
glogger = new(log.GlogHandler)
config string
cmdRoot = &cobra.Command{
Use: "ethverify",
Short: "Management and verification of contracts",
PersistentPreRunE: readConfig,
}
cmdGitFind = &cobra.Command{
Use: "git-find <contract_address> <Path/to/contracts/repo> <elvmasterd_rpc_url> ",
Short: "Manage and retrieve the contract's git version",
Long: `git-find helps to retrieve the git version at which the contract bytecode is present.
The parameters can be set using flags or config file.`,
Args: cobra.RangeArgs(2,3),
RunE: runGitFind,
Example: `if running from contracts repo : ethverify git-find 0xCAFE . "http://localhost:8545"`,
}
cmdAbiDiff = &cobra.Command{
Use: "abi-diff",
Short: "To identify changes made to contracts",
Long: `abi-diff compares the new abi with stored abi and specifies any critical changes are present.
If the changes made are not critical, the store dir has new set of abi.
Also, the stored abi can be overwritten with new abi using --overwrite flag.
The default path for storedir is "./store", which can be changed using --storedir flag.
`,
RunE: runAbiDiff,
Example: `if run from 'contracts' repo, "ethverify abi-diff"
else "ethverify abi-diff --storedir <Path/to/stored/abi/dir>"
`,
}
)
func init() {
// cmd flags
cmdRoot.PersistentFlags().Int("verbosity", 3, "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail")
cmdRoot.PersistentFlags().String("log-file", "", "Output log file")
cmdRoot.PersistentFlags().StringVar(&config, "config", "", "Config file path (default=<Homedir>/.eluvio/ethverify/config.toml")
cmdAbiDiff.Flags().Bool("overwrite", false, "overwrite 'store' directory with new abi, even if abi-diff throws breaking changes")
cmdAbiDiff.Flags().String("storedir", "./store", "directory for stored abi files")
_ = viper.BindPFlag("verbosity", cmdRoot.PersistentFlags().Lookup("verbosity"))
_ = viper.BindPFlag("log_file", cmdRoot.PersistentFlags().Lookup("log-file"))
_ = viper.BindPFlag("git_find.ethurl", cmdGitFind.Flags().Lookup("ethurl"))
_ = viper.BindPFlag("git_find.rootdir", cmdGitFind.Flags().Lookup("rootdir"))
_ = viper.BindPFlag("git_find.contract", cmdGitFind.Flags().Lookup("contract"))
_ = viper.BindPFlag("abi_diff.overwrite", cmdAbiDiff.Flags().Lookup("overwrite"))
_ = viper.BindPFlag("abi_diff.storedir", cmdAbiDiff.Flags().Lookup("storedir"))
// for env variable
replacer := strings.NewReplacer(".", "_")
viper.SetEnvKeyReplacer(replacer)
viper.AutomaticEnv()
cmdRoot.AddCommand(cmdGitFind)
cmdRoot.AddCommand(cmdAbiDiff)
}
func readConfig(cmd *cobra.Command, args []string) error {
// if --config is passed, attempt to parse the config file
var filename string
if config == "" {
home, err := homedir.Dir()
var folderPath string
if err == nil {
folderPath = path.Join(home, DefaultElvFolder)
_, err := os.Stat(folderPath)
if os.IsNotExist(err) {
err = os.MkdirAll(folderPath, os.FileMode(0700))
if err != nil {
return fmt.Errorf("create config folder failed : folder = %v, err = %v", folderPath, err)
}
}
}
file := path.Join(folderPath, "config.toml")
// checks if config file exists
if _, err := os.Stat(file); !os.IsNotExist(err) {
log.Warn("reading from default config file", "filepath", file)
filename = filepath.Base(file)
viper.SetConfigName(filename[:len(filename)-len(filepath.Ext(filename))])
viper.AddConfigPath(folderPath)
err := viper.ReadInConfig()
if err != nil {
return fmt.Errorf("failed to read config file - %v", err)
}
}
} else {
filename = filepath.Base(config)
viper.SetConfigName(filename[:len(filename)-len(filepath.Ext(filename))])
viper.AddConfigPath(filepath.Dir(config))
err := viper.ReadInConfig()
if err != nil {
return fmt.Errorf("failed to read config file - %v", err)
}
}
var output io.Writer
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
logFile := viper.GetString("log_file")
verbosity := viper.GetInt("verbosity")
var lvl log.Lvl
switch verbosity {
case 0:
lvl = log.LvlCrit
case 1:
lvl = log.LvlError
case 2:
lvl = log.LvlWarn
case 3:
lvl = log.LvlInfo
case 4:
lvl = log.LvlDebug
case 5:
lvl = log.LvlTrace
}
if usecolor {
output = colorable.NewColorableStderr()
}
var err error
if logFile != "" {
ostream, err = log.FileHandler(logFile, log.TerminalFormat(false))
if err != nil {
utils.Fatalf("error setting logger file", err)
}
} else {
output = io.Writer(os.Stdout)
ostream = log.StreamHandler(output, log.TerminalFormat(usecolor))
}
glogger = log.NewGlogHandler(ostream)
glogger.Verbosity(lvl)
log.Root().SetHandler(glogger)
return nil
}
func runGitFind(cmd *cobra.Command, args []string) error {
contractAddr := args[0]
if !common.IsHexAddress(contractAddr) || contractAddr == "" {
return fmt.Errorf("contract address provided is invalid, contract addr = %v", contractAddr)
}
rootDir := args[1]
if rootDir == "" {
return fmt.Errorf("root directory is nil")
}
var ethurl string
if len(args) > 2 {
ethurl = args[2]
} else {
ethurl = "http://localhost:8545"
log.Warn(fmt.Sprintf("elvmasterd_rpc_url:%v",ethurl))
}
gitCommits, err := gitutils.GetContractGitCommit(rootDir, ethurl, common.HexToAddress(contractAddr))
if err != nil {
return err
|
if len(gitCommits) > 0 {
var table *tablewriter.Table
table = tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Git Contract Info"})
table.SetRowLine(true)
for _, v := range gitCommits {
table.Append([]string{v})
}
table.Render()
} else{
log.Info("No git commits are present for given contract address","contract address",contractAddr)
}
return nil
}
func runAbiDiff(cmd *cobra.Command, args []string) error {
overwrite := viper.GetBool("abi_diff.overwrite")
storeDir := viper.GetString("abi_diff.storedir")
diffItem, err := abidiff.VerifyAllABI(overwrite, storeDir)
if err != nil {
return err
}
if len(diffItem) == 0 {
log.Info("NO DIFFERENCES in abi")
return nil
}
checkForBreakingChanges := false
for _, v := range diffItem {
if v.Breaking {
checkForBreakingChanges = true
log.Info("BREAKING Changes", "report", v.Report)
} else {
log.Info("Changes", "report", v.Report)
}
}
// No Breaking changes, replace the stored abi
if !checkForBreakingChanges {
_, err = abidiff.VerifyAllABI(true, storeDir)
if err != nil {
return err
}
log.Info("No Breaking changes are made, replacing the STORED abi to NEW abi")
}
return nil
}
func Execute() error {
err := cmdRoot.Execute()
if err != nil {
log.Error(err.Error())
// seems post-run is not invoked in that case
// cleanupServer(nil, nil)
}
return err
}
func main() {
if err := Execute(); err != nil {
os.Exit(1)
}
log.Info("LEAVING eth-verify...")
}
|
}
|
query_registry.py
|
from ted_sws.rml_to_html.resources import get_sparql_query
class QueryRegistry:
@property
def TRIPLE_MAP(self):
return get_sparql_query(query_file_name="get_triple_maps.rq")
@property
def LOGICAL_SOURCE(self):
return get_sparql_query(query_file_name="get_logical_source.rq")
@property
def SUBJECT_MAP(self):
return get_sparql_query(query_file_name="get_subject_map.rq")
@property
def PREDICATE_OBJECT_MAP(self):
return get_sparql_query(query_file_name="get_predicate_object_map.rq") \
@property
|
def TRIPLE_MAP_COMMENT_LABEL(self):
return get_sparql_query(query_file_name="get_label_comment.rq")
|
|
DashboardMenuConfComponent.ts
|
import Component from 'vue-class-component';
import { Prop, Vue, Watch } from 'vue-property-decorator';
import ModuleDAO from '../../../../../shared/modules/DAO/ModuleDAO';
import InsertOrDeleteQueryResult from '../../../../../shared/modules/DAO/vos/InsertOrDeleteQueryResult';
import ModuleDashboardBuilder from '../../../../../shared/modules/DashboardBuilder/ModuleDashboardBuilder';
import DashboardVO from '../../../../../shared/modules/DashboardBuilder/vos/DashboardVO';
import MenuElementVO from '../../../../../shared/modules/Menu/vos/MenuElementVO';
import ModuleTranslation from '../../../../../shared/modules/Translation/ModuleTranslation';
import TranslatableTextVO from '../../../../../shared/modules/Translation/vos/TranslatableTextVO';
import TranslationVO from '../../../../../shared/modules/Translation/vos/TranslationVO';
import ConsoleHandler from '../../../../../shared/tools/ConsoleHandler';
import { ModuleTranslatableTextAction } from '../../InlineTranslatableText/TranslatableTextStore';
import MenuController from '../../menu/MenuController';
import MenuOrganizerComponent from '../../menu/organizer/MenuOrganizerComponent';
import VueComponentBase from '../../VueComponentBase';
import './DashboardMenuConfComponent.scss';
@Component({
template: require('./DashboardMenuConfComponent.pug'),
components: {
Menuorganizercomponent: MenuOrganizerComponent
}
})
export default class
|
extends VueComponentBase {
@Prop()
private dashboard: DashboardVO;
@ModuleTranslatableTextAction
private set_flat_locale_translation: (translation: { code_text: string, value: string }) => void;
private menu_app: { [app_name: string]: number } = {};
private app_names: string[] = [];
private is_loading: boolean = true;
@Watch('dashboard', { immediate: true })
private async onchange_dashboard() {
this.is_loading = true;
if (!this.dashboard) {
this.menu_app = {};
this.is_loading = false;
return;
}
this.menu_app = {};
this.app_names = Object.keys(MenuController.getInstance().menus_by_app_names);
for (let i in this.app_names) {
let app_name = this.app_names[i];
let db_menu: MenuElementVO = await ModuleDAO.getInstance().getNamedVoByName<MenuElementVO>(
MenuElementVO.API_TYPE_ID, 'dashboard__menu__' + app_name + '__' + this.dashboard.id);
if (db_menu) {
this.menu_app[db_menu.app_name] = db_menu.id;
}
}
this.is_loading = false;
}
private get_menu(app_name: string): MenuElementVO {
if (!this.dashboard) {
return null;
}
let res: MenuElementVO = new MenuElementVO();
res.access_policy_name = ModuleDashboardBuilder.POLICY_FO_ACCESS;
res.app_name = app_name;
res.fa_class = "fa-area-chart";
res.hidden = true;
res.menu_parent_id = null;
res.name = 'dashboard__menu__' + app_name + '__' + this.dashboard.id;
res.target = 'Dashboard View';
res.target_is_routename = true;
res.target_route_params = '{ "dashboard_id": ' + this.dashboard.id + ' }';
res.weight = -1;
return res;
}
private async switch_menu_app(app_name: string) {
this.is_loading = true;
if (this.dashboard) {
let db_menu: MenuElementVO = await ModuleDAO.getInstance().getNamedVoByName<MenuElementVO>(
MenuElementVO.API_TYPE_ID, 'dashboard__menu__' + app_name + '__' + this.dashboard.id);
if (!!this.menu_app[app_name]) {
if (!!db_menu) {
await ModuleDAO.getInstance().deleteVOs([db_menu]);
await MenuController.getInstance().reload_from_db();
this.app_names = Object.keys(MenuController.getInstance().menus_by_app_names);
}
Vue.set(this.menu_app, app_name, null);
} else {
if (!!db_menu) {
Vue.set(this.menu_app, app_name, db_menu.id);
this.is_loading = false;
return;
}
db_menu = this.get_menu(app_name);
let translatable_text_menu = await ModuleTranslation.getInstance().getTranslatableText(db_menu.translatable_title);
if (!translatable_text_menu) {
translatable_text_menu = new TranslatableTextVO();
translatable_text_menu.code_text = db_menu.translatable_title;
let insertOrDeleteQueryResulttt: InsertOrDeleteQueryResult = await ModuleDAO.getInstance().insertOrUpdateVO(translatable_text_menu);
if ((!insertOrDeleteQueryResulttt) || (!insertOrDeleteQueryResulttt.id)) {
ConsoleHandler.getInstance().error('Failed switch_menu_app create translatable text');
this.is_loading = false;
return;
}
translatable_text_menu.id = insertOrDeleteQueryResulttt.id;
}
/**
* On se base sur la trad actuelle du dashboard
*/
let db_translatable_text = await ModuleTranslation.getInstance().getTranslatableText(this.dashboard.translatable_name_code_text);
if (db_translatable_text) {
let translations: TranslationVO[] = await ModuleDAO.getInstance().getVosByRefFieldIds<TranslationVO>(
TranslationVO.API_TYPE_ID, 'text_id', [db_translatable_text.id]);
for (let i in translations) {
let translation = translations[i];
let menu_translation: TranslationVO = await ModuleTranslation.getInstance().getTranslation(translation.lang_id, translatable_text_menu.id);
if (!menu_translation) {
menu_translation = new TranslationVO();
menu_translation.lang_id = translation.lang_id;
menu_translation.text_id = translatable_text_menu.id;
menu_translation.translated = translation.translated;
let resi = await ModuleDAO.getInstance().insertOrUpdateVO(menu_translation);
if (resi && resi.id) {
this.set_flat_locale_translation({
code_text: translatable_text_menu.code_text,
value: translation.translated
});
}
}
}
}
let insertOrDeleteQueryResult: InsertOrDeleteQueryResult = await ModuleDAO.getInstance().insertOrUpdateVO(this.get_menu(app_name));
if ((!insertOrDeleteQueryResult) || !insertOrDeleteQueryResult.id) {
ConsoleHandler.getInstance().error('Failed switch_menu_app create');
this.is_loading = false;
return;
}
Vue.set(this.menu_app, app_name, insertOrDeleteQueryResult.id);
await MenuController.getInstance().reload_from_db();
this.app_names = Object.keys(MenuController.getInstance().menus_by_app_names);
}
}
this.is_loading = false;
}
}
|
DashboardMenuConfComponent
|
App_20210511150821.js
|
import React from 'react';
import API from './API';
import './App.css';
import Navigation from './components/Navigation';
import { BrowserRouter as Router, Switch, Route } from 'react-router-dom';
import Home from './pages/Home';
import Planets from './pages/Planets';
import Starships from './pages/Starships';
import Spinner from './components/Spinner';
import PersonDetails from './components/PersonDetails';
function App() {
const [people, setPeople] = React.useState([]);
const [planets, setPlanets] = React.useState([]);
const [starships, setStarships] = React.useState([]);
const [loading, setLoading] = React.useState(true);
const [userSearch, setUserSearch] = React.useState('');
const {personId, setPersonId} = React.useState(null)
React.useEffect(() => {
getPeople();
getPlanets();
getStarships();
setLoading(false);
}, []);
const handleClick = (id) => {
setPersonId(id)
}
const updateUserSearch = (e) => {
setUserSearch(e.target.value);
};
const getPeople = () => {
API.fetchAllPeople()
.then((data) => setPeople(data.results))
.catch((error) => console.error(error));
};
const getPlanets = () => {
API.fetchAllPlanets()
|
.then((data) => setPlanets(data.results))
.catch((error) => console.error(error));
};
const getStarships = () => {
API.fetchAllStarships()
.then((data) => setStarships(data.results))
.catch((error) => console.error(error));
};
console.log('people', people);
console.log('planets', planets);
console.log('starships', starships);
return (
<>
<Router>
<Navigation
updateUserSearch={updateUserSearch}
userSearch={userSearch}
/>
{loading ? (
<Spinner />
) : (
<Switch>
<Route exact path="/">
<Home people={people} userSearch={userSearch} personId={personId} handleClick={handleClick}/>
</Route>
<Route exact path="/planets">
<Planets planets={planets} />
</Route>
<Route exact path="/starships">
<Starships starships={starships} />
</Route>
<Route path={`/people/${setPersonId}/`}>
<PersonDetails people={people} personId={setPersonId}/>
</Route>
</Switch>
)}
</Router>
</>
);
}
export default App;
| |
piped_deconvolve.py
|
'''Implements a multiprocessing deconvolution algorithm
'''
import os
import multiprocessing
from collections import deque
import ms_peak_picker
import ms_deisotope
import traceback
from ms_deisotope.processor import (
ScanProcessor, MSFileLoader,
NoIsotopicClustersError, EmptyScanError)
from ms_deisotope.feature_map.quick_index import index as build_scan_index
from ms_deisotope.data_source.common import ProcessedScan
import logging
from glycan_profiling.task import (
TaskBase,
log_handle,
CallInterval)
from glycan_profiling.config import get_configuration
from multiprocessing import Process, JoinableQueue
try:
from Queue import Empty as QueueEmpty
except ImportError:
from queue import Empty as QueueEmpty
logger = logging.getLogger("glycan_profiler.preprocessor")
DONE = b"--NO-MORE--"
SCAN_STATUS_GOOD = b"good"
SCAN_STATUS_SKIP = b"skip"
user_config = get_configuration()
huge_tree = user_config.get("xml_huge_tree", False)
savgol = ms_peak_picker.scan_filter.SavitskyGolayFilter()
denoise = ms_peak_picker.scan_filter.FTICRBaselineRemoval(window_length=2.)
class ScanIDYieldingProcess(Process):
def __init__(self, ms_file_path, queue, start_scan=None, max_scans=None, end_scan=None,
no_more_event=None, ignore_tandem_scans=False, batch_size=1):
Process.__init__(self)
self.daemon = True
self.ms_file_path = ms_file_path
self.queue = queue
self.loader = None
self.start_scan = start_scan
self.max_scans = max_scans
self.end_scan = end_scan
self.ignore_tandem_scans = ignore_tandem_scans
self.batch_size = batch_size
self.no_more_event = no_more_event
def _make_scan_batch(self):
batch = []
scan_ids = []
for _i in range(self.batch_size):
try:
bunch = next(self.loader)
scan, products = bunch
if scan is not None:
scan_id = scan.id
else:
scan_id = None
product_scan_ids = [p.id for p in products]
except StopIteration:
break
except Exception as e:
log_handle.error("An error occurred in _make_scan_batch", e)
break
if not self.ignore_tandem_scans:
batch.append((scan_id, product_scan_ids, True))
else:
batch.append((scan_id, product_scan_ids, False))
scan_ids.append(scan_id)
return batch, scan_ids
def run(self):
self.loader = MSFileLoader(
self.ms_file_path, huge_tree=huge_tree, decode_binary=False)
if self.start_scan is not None:
try:
self.loader.start_from_scan(
self.start_scan, require_ms1=self.loader.has_ms1_scans(), grouped=True)
except IndexError as e:
log_handle.error("An error occurred while locating start scan", e)
self.loader.reset()
self.loader.make_iterator(grouped=True)
except AttributeError:
log_handle.error("The reader does not support random access, start time will be ignored", e)
self.loader.reset()
self.loader.make_iterator(grouped=True)
else:
self.loader.make_iterator(grouped=True)
count = 0
last = 0
if self.max_scans is None:
max_scans = float('inf')
else:
max_scans = self.max_scans
end_scan = self.end_scan
while count < max_scans:
try:
batch, ids = self._make_scan_batch()
if len(batch) > 0:
self.queue.put(batch)
count += len(ids)
if (count - last) > 1000:
last = count
self.queue.join()
if (end_scan in ids and end_scan is not None) or len(ids) == 0:
log_handle.log("End Scan Found")
break
except StopIteration:
break
except Exception as e:
log_handle.error("An error occurred while fetching scans", e)
break
if self.no_more_event is not None:
self.no_more_event.set()
log_handle.log("All Scan IDs have been dealt. %d scan bunches." % (count,))
else:
self.queue.put(DONE)
class ScanBunchLoader(object):
def __init__(self, mzml_loader):
self.loader = mzml_loader
self.queue = deque()
def put(self, scan_id, product_scan_ids):
self.queue.append((scan_id, product_scan_ids))
def get(self):
scan_id, product_scan_ids = self.queue.popleft()
if scan_id is not None:
precursor = self.loader.get_scan_by_id(scan_id)
else:
precursor = None
products = [self.loader.get_scan_by_id(
pid) for pid in product_scan_ids if pid is not None]
if precursor:
precursor.product_scans = products
return (precursor, products)
class ScanTransformMixin(object):
def log_error(self, error, scan_id, scan, product_scan_ids):
tb = traceback.format_exc()
self.log_handler(
"An %r occurred for %s (index %r) in Process %r\n%s" % (
error, scan_id, scan.index, multiprocessing.current_process(),
tb))
def _init_batch_store(self):
self._batch_store = deque()
def get_work(self, block=True, timeout=30):
if self._batch_store:
return self._batch_store.popleft()
else:
batch = self.input_queue.get(block, timeout)
self._batch_store.extend(batch)
result = self._batch_store.popleft()
return result
def
|
(self, message):
self.log_handler(message + ", %r" %
(multiprocessing.current_process()))
def skip_entry(self, index, ms_level):
self.output_queue.put((SCAN_STATUS_SKIP, index, ms_level))
def skip_scan(self, scan):
self.output_queue.put((SCAN_STATUS_SKIP, scan.index, scan.ms_level))
def send_scan(self, scan):
scan = scan.pack()
# this attribute is not needed, and for MS1 scans is dangerous
# to pickle.
# It can pull other scans which may not yet have been packed
# into the message sent back to the main process which in
# turn can form a reference cycle and eat a lot of memory
scan.product_scans = []
self.output_queue.put((scan, scan.index, scan.ms_level))
def all_work_done(self):
return self._work_complete.is_set()
def make_scan_transformer(self, loader=None):
raise NotImplementedError()
class ScanTransformingProcess(Process, ScanTransformMixin):
"""ScanTransformingProcess describes a child process that consumes scan id bunches
from a shared input queue, retrieves the relevant scans, and preprocesses them using an
instance of :class:`ms_deisotope.processor.ScanProcessor`, sending the reduced result
to a shared output queue.
Attributes
----------
input_queue : multiprocessing.JoinableQueue
A shared input queue which contains payloads of bunches of
scan ids
ms1_deconvolution_args : dict
Parameters passed to :class:`ms_deisotope.processor.ScanProcessor`
ms1_peak_picking_args : dict
Parameters passed to :class:`ms_deisotope.processor.ScanProcessor`
msn_deconvolution_args : dict
Parameters passed to :class:`ms_deisotope.processor.ScanProcessor`
msn_peak_picking_args : dict
Parameters passed to :class:`ms_deisotope.processor.ScanProcessor`
mzml_path : str
Path to the spectral data file on disk
no_more_event : multiprocessing.Event
An event which will be set when the process feeding the input
queue has run out of items to add, indicating that any QueueEmptyException
should be treated as a signal to finish rather than to wait for
new input
output_queue : multiprocessing.JoinableQueue
A shared output queue which this object will put
:class:`ms_deisotope.data_source.common.ProcessedScan` bunches onto.
"""
def __init__(self, mzml_path, input_queue, output_queue,
no_more_event=None, ms1_peak_picking_args=None,
msn_peak_picking_args=None,
ms1_deconvolution_args=None, msn_deconvolution_args=None,
envelope_selector=None, ms1_averaging=0, log_handler=None,
deconvolute=True, verbose=False):
if log_handler is None:
def print_message(msg):
print(msg)
log_handler = print_message
if ms1_peak_picking_args is None:
ms1_peak_picking_args = {
"transforms": [denoise, savgol],
"start_mz": 250
}
if msn_peak_picking_args is None:
msn_peak_picking_args = {
"transforms": []
}
if ms1_deconvolution_args is None:
ms1_deconvolution_args = {
"scorer": ms_deisotope.scoring.PenalizedMSDeconVFitter(35., 2),
"charge_range": (1, 8),
"averagine": ms_deisotope.glycopeptide
}
if msn_deconvolution_args is None:
msn_deconvolution_args = {
"scorer": ms_deisotope.scoring.MSDeconVFitter(10.),
"charge_range": (1, 8),
"averagine": ms_deisotope.glycopeptide
}
Process.__init__(self)
self.verbose = verbose
self._init_batch_store()
self.daemon = True
self.mzml_path = mzml_path
self.input_queue = input_queue
self.output_queue = output_queue
self.ms1_peak_picking_args = ms1_peak_picking_args
self.msn_peak_picking_args = msn_peak_picking_args
self.ms1_deconvolution_args = ms1_deconvolution_args
self.msn_deconvolution_args = msn_deconvolution_args
self.envelope_selector = envelope_selector
self.ms1_averaging = ms1_averaging
self.deconvolute = deconvolute
self.transformer = None
self.no_more_event = no_more_event
self._work_complete = multiprocessing.Event()
self.log_handler = log_handler
def make_scan_transformer(self, loader=None):
transformer = ScanProcessor(
loader,
ms1_peak_picking_args=self.ms1_peak_picking_args,
msn_peak_picking_args=self.msn_peak_picking_args,
ms1_deconvolution_args=self.ms1_deconvolution_args,
msn_deconvolution_args=self.msn_deconvolution_args,
loader_type=lambda x: x,
envelope_selector=self.envelope_selector,
ms1_averaging=self.ms1_averaging)
return transformer
def handle_scan_bunch(self, scan, product_scans, scan_id, product_scan_ids, process_msn=True):
transformer = self.transformer
# handle the MS1 scan if it is present
if scan is not None:
if len(scan.arrays[0]) == 0:
self.skip_scan(scan)
else:
try:
scan, priorities, product_scans = transformer.process_scan_group(
scan, product_scans)
if scan is None:
# no way to report skip
pass
else:
if self.verbose:
self.log_message("Handling Precursor Scan %r with %d peaks" % (scan.id, len(scan.peak_set)))
if self.deconvolute:
transformer.deconvolute_precursor_scan(scan, priorities, product_scans)
self.send_scan(scan)
except NoIsotopicClustersError as e:
self.log_message("No isotopic clusters were extracted from scan %s (%r)" % (
e.scan_id, len(scan.peak_set)))
self.skip_scan(scan)
except EmptyScanError as e:
self.skip_scan(scan)
except Exception as e:
self.skip_scan(scan)
self.log_error(e, scan_id, scan, (product_scan_ids))
for product_scan in product_scans:
# no way to report skip
if product_scan is None:
continue
if len(product_scan.arrays[0]) == 0 or (not process_msn):
self.skip_scan(product_scan)
continue
try:
transformer.pick_product_scan_peaks(product_scan)
if self.verbose:
self.log_message("Handling Product Scan %r with %d peaks (%0.3f/%0.3f, %r)" % (
product_scan.id, len(product_scan.peak_set), product_scan.precursor_information.mz,
product_scan.precursor_information.extracted_mz,
product_scan.precursor_information.defaulted))
if self.deconvolute:
transformer.deconvolute_product_scan(product_scan)
if scan is None:
product_scan.precursor_information.default(orphan=True)
self.send_scan(product_scan)
except NoIsotopicClustersError as e:
self.log_message("No isotopic clusters were extracted from scan %s (%r)" % (
e.scan_id, len(product_scan.peak_set)))
self.skip_scan(product_scan)
except EmptyScanError as e:
self.skip_scan(product_scan)
except Exception as e:
self.skip_scan(product_scan)
self.log_error(e, product_scan.id,
product_scan, (product_scan_ids))
def run(self):
loader = MSFileLoader(
self.mzml_path, huge_tree=huge_tree, decode_binary=False)
queued_loader = ScanBunchLoader(loader)
has_input = True
transformer = self.make_scan_transformer(loader)
self.transformer = transformer
nologs = ["deconvolution_scan_processor"]
if not self.deconvolute:
nologs.append("deconvolution")
debug_mode = os.getenv("GLYCRESOFTDEBUG")
if debug_mode:
handler = logging.FileHandler("piped-deconvolution-debug-%s.log" % (os.getpid()), 'w')
fmt = logging.Formatter(
"%(asctime)s - %(name)s:%(filename)s:%(lineno)-4d - %(levelname)s - %(message)s",
"%H:%M:%S")
handler.setFormatter(fmt)
for logname in nologs:
logger_to_silence = logging.getLogger(logname)
if debug_mode:
logger_to_silence.setLevel("DEBUG")
logger_to_silence.addHandler(handler)
else:
logger_to_silence.propagate = False
logger_to_silence.setLevel("CRITICAL")
logger_to_silence.addHandler(logging.NullHandler())
i = 0
last = 0
while has_input:
try:
scan_id, product_scan_ids, process_msn = self.get_work(True, 10)
self.input_queue.task_done()
except QueueEmpty:
if self.no_more_event is not None and self.no_more_event.is_set():
has_input = False
continue
i += 1 + len(product_scan_ids)
if scan_id == DONE:
has_input = False
break
try:
queued_loader.put(scan_id, product_scan_ids)
scan, product_scans = queued_loader.get()
except Exception as e:
self.log_message("Something went wrong when loading bunch (%s): %r.\nRecovery is not possible." % (
(scan_id, product_scan_ids), e))
self.handle_scan_bunch(scan, product_scans, scan_id, product_scan_ids, process_msn)
if (i - last) > 1000:
last = i
self.output_queue.join()
self.log_message("Done (%d scans)" % i)
if self.no_more_event is None:
self.output_queue.put((DONE, DONE, DONE))
self._work_complete.set()
class ScanCollator(TaskBase):
"""Collates incoming scan bunches from multiple
ScanTransformingProcesses, passing them along in
the correct order.
Attributes
----------
count_jobs_done : int
The number of scan bunches taken from :attr:`queue`
count_since_last : int
The number of work-cycles since the last scan bunch
has been yielded
done_event : multiprocessing.Event
An IPC Event to indicate that all scan ids have been
sent to the worker processes
helper_producers : list
A list of ScanTransformingProcesses
include_fitted : bool
Whether or not to save the raw fitted peaks for each
scan produced. When this is `False`, they will be
discarded and memory will be saved
last_index : int
The index of the last scan yielded through the iterator
loop. This controls the next scan to be yielded and any
waiting conditions
primary_worker : ScanTransformingProcess
The first worker to start consuming scans which will dictate
the first handled index. Is required to run in isolation
from other worker processes to insure that the first scan
arrives in order
queue : multiprocessing.Queue
The IPC queue that all workers place their results on
to be consumed and yielded in order
started_helpers : bool
Whether or not the additional workers in :attr:`helper_producers`
have been started or not
waiting : dict
A mapping from scan index to `Scan` object. Used to serve
scans through the iterator when their index is called for
"""
_log_received_scans = False
def __init__(self, queue, done_event, helper_producers=None, primary_worker=None,
include_fitted=False, input_queue=None):
if helper_producers is None:
helper_producers = []
self.queue = queue
self.last_index = None
self.count_jobs_done = 0
self.count_since_last = 0
self.waiting = {}
self.done_event = done_event
self.helper_producers = helper_producers
self.started_helpers = False
self.primary_worker = primary_worker
self.include_fitted = include_fitted
self.input_queue = input_queue
def all_workers_done(self):
if self.done_event.is_set():
if self.primary_worker.all_work_done():
for helper in self.helper_producers:
if not helper.all_work_done():
return False
return True
else:
return False
return False
def store_item(self, item, index):
"""Stores an incoming work-item for easy
access by its `index` value. If configuration
requires it, this will also reduce the number
of peaks in `item`.
Parameters
----------
item : str or ProcessedScan
Either a stub indicating why this work item
is not
index : int
Scan index to store
"""
if self._log_received_scans:
self.log("-- received %d: %s" % (index, item))
self.waiting[index] = item
if not self.include_fitted and isinstance(item, ProcessedScan):
item.peak_set = []
def consume(self, timeout=10):
"""Fetches the next work item from the input
queue :attr:`queue`, blocking for at most `timeout` seconds.
Parameters
----------
timeout : int, optional
The duration to allow the process to block
for while awaiting new work items.
Returns
-------
bool
Whether or not a new work item was found waiting
on the :attr:`queue`
"""
blocking = timeout != 0
try:
item, index, _ms_level = self.queue.get(blocking, timeout)
self.queue.task_done()
# DONE message may be sent many times.
while item == DONE:
item, index, _ms_level = self.queue.get(blocking, timeout)
self.queue.task_done()
self.store_item(item, index)
return True
except QueueEmpty:
return False
def start_helper_producers(self):
"""Starts the additional :class:`ScanTransformingProcess` workers
in :attr:`helper_producers` if they have not been started already.
Should only be invoked once
"""
if self.started_helpers:
return
self.started_helpers = True
for helper in self.helper_producers:
if helper.is_alive():
continue
helper.start()
def produce(self, scan):
"""Performs any final quality controls on the outgoing
:class:`ProcessedScan` object and takes care of any internal
details.
Resets :attr:`count_since_last` to `0`.
Parameters
----------
scan : ProcessedScan
The scan object being finalized for hand-off
to client code
Returns
-------
ProcessedScan
The version of `scan` ready to be used by other
parts of the program
"""
self.count_since_last = 0
return scan
def count_pending_items(self):
return len(self.waiting)
def drain_queue(self):
i = 0
has_next = self.last_index + 1 not in self.waiting
while (self.count_pending_items() < (1000 if has_next else 10)
and self.consume(.1)):
self.count_jobs_done += 1
has_next = self.last_index + 1 not in self.waiting
i += 1
if i > 15:
self.log("Drained Output Queue of %d Items" % (i, ))
return i
def print_state(self):
try:
if self.queue.qsize() > 0:
self.log("%d since last work item" % (self.count_since_last,))
keys = sorted(self.waiting.keys())
if len(keys) > 5:
self.log("Waiting Keys: %r..." % (keys[:5],))
else:
self.log("Waiting Keys: %r" % (keys,))
self.log("%d Keys Total" % (len(self.waiting),))
self.log("The last index handled: %r" % (self.last_index,))
self.log("Number of items waiting in the queue: %d" %
(self.queue.qsize(),))
except NotImplementedError:
# Some platforms do not support qsize
pass
for worker in ([self.primary_worker] + list(self.helper_producers)):
code = worker.exitcode
if code is not None and code != 0:
self.log("%r has exit code %r" % (worker, code))
worker.join(5)
def __iter__(self):
has_more = True
# Log the state of the collator every 3 minutes
status_monitor = CallInterval(60 * 3, self.print_state)
status_monitor.start()
while has_more:
if self.consume(1):
self.count_jobs_done += 1
try:
if self.queue.qsize() > 500:
self.drain_queue()
except NotImplementedError:
# Some platforms do not support qsize. On these, always drain the queue.
self.drain_queue()
if self.last_index is None:
keys = sorted(self.waiting)
if keys:
i = 0
n = len(keys)
found_content = False
while i < n:
scan = self.waiting.pop(keys[i])
if scan == SCAN_STATUS_SKIP:
self.last_index = keys[i]
i += 1
continue
else:
found_content = True
break
if found_content:
self.last_index = scan.index
yield self.produce(scan)
if self.last_index is not None:
self.start_helper_producers()
elif self.last_index + 1 in self.waiting:
while self.last_index + 1 in self.waiting:
scan = self.waiting.pop(self.last_index + 1)
if scan == SCAN_STATUS_SKIP:
self.last_index += 1
continue
else:
self.last_index = scan.index
yield self.produce(scan)
elif len(self.waiting) == 0:
if self.all_workers_done():
self.log("All Workers Claim Done.")
has_something = self.consume()
self.log("Checked Queue For Work: %r" % has_something)
if not has_something and len(self.waiting) == 0 and self.queue.empty():
has_more = False
else:
self.count_since_last += 1
if self.count_since_last % 1000 == 0:
self.print_state()
status_monitor.stop()
class ScanGeneratorBase(object):
def configure_iteration(self, start_scan=None, end_scan=None, max_scans=None):
raise NotImplementedError()
def make_iterator(self, start_scan=None, end_scan=None, max_scans=None):
raise NotImplementedError()
def __iter__(self):
return self
def __next__(self):
if self._iterator is None: # pylint: disable=access-member-before-definition
self._iterator = self.make_iterator()
return next(self._iterator)
def next(self):
return self.__next__()
def close(self):
pass
@property
def scan_source(self):
return None
_deconvoluting = False
@property
def deconvoluting(self):
return self._deconvoluting
@deconvoluting.setter
def deconvoluting(self, value):
self._deconvoluting = value
_ms1_averaging = 0
@property
def ms1_averaging(self):
return self._ms1_averaging
@ms1_averaging.setter
def ms1_averaging(self, value):
self._ms1_averaging = value
_ignore_tandem_scans = False
@property
def ignore_tandem_scans(self):
return self._ignore_tandem_scans
@ignore_tandem_scans.setter
def ignore_tandem_scans(self, value):
self._ignore_tandem_scans = value
_extract_only_tandem_envelopes = False
@property
def extract_only_tandem_envelopes(self):
return self._extract_only_tandem_envelopes
@extract_only_tandem_envelopes.setter
def extract_only_tandem_envelopes(self, value):
self._extract_only_tandem_envelopes = value
class ScanGenerator(TaskBase, ScanGeneratorBase):
def __init__(self, ms_file, number_of_helpers=4,
ms1_peak_picking_args=None, msn_peak_picking_args=None,
ms1_deconvolution_args=None, msn_deconvolution_args=None,
extract_only_tandem_envelopes=False, ignore_tandem_scans=False,
ms1_averaging=0, deconvolute=True):
self.ms_file = ms_file
self.time_cache = {}
self.ignore_tandem_scans = ignore_tandem_scans
self.scan_ids_exhausted_event = multiprocessing.Event()
self._iterator = None
self._scan_yielder_process = None
self._deconv_process = None
self._input_queue = None
self._output_queue = None
self._deconv_helpers = None
self._order_manager = None
self.number_of_helpers = number_of_helpers
self.ms1_peak_picking_args = ms1_peak_picking_args
self.msn_peak_picking_args = msn_peak_picking_args
self.ms1_averaging = ms1_averaging
self.deconvoluting = deconvolute
self.ms1_deconvolution_args = ms1_deconvolution_args
self.msn_deconvolution_args = msn_deconvolution_args
self.extract_only_tandem_envelopes = extract_only_tandem_envelopes
self._scan_interval_tree = None
self.log_controller = self.ipc_logger()
@property
def scan_source(self):
return self.ms_file
def join(self):
if self._scan_yielder_process is not None:
self._scan_yielder_process.join()
if self._deconv_process is not None:
self._deconv_process.join()
if self._deconv_helpers is not None:
for helper in self._deconv_helpers:
helper.join()
def _terminate(self):
if self._scan_yielder_process is not None:
self._scan_yielder_process.terminate()
if self._deconv_process is not None:
self._deconv_process.terminate()
if self._deconv_helpers is not None:
for helper in self._deconv_helpers:
helper.terminate()
def _preindex_file(self):
reader = MSFileLoader(self.ms_file, use_index=False, huge_tree=huge_tree)
try:
reader.prebuild_byte_offset_file(self.ms_file)
except AttributeError:
# the type does not support this type of indexing
pass
except IOError:
# the file could not be written
pass
except Exception as e:
# something else went wrong
self.error("An error occurred while pre-indexing.", e)
def _make_interval_tree(self, start_scan, end_scan):
reader = MSFileLoader(self.ms_file, decode_binary=False)
if start_scan is not None:
start_ix = reader.get_scan_by_id(start_scan).index
else:
start_ix = 0
if end_scan is not None:
end_ix = reader.get_scan_by_id(end_scan).index
else:
end_ix = len(reader)
reader.reset()
_index, interval_tree = build_scan_index(
reader, self.number_of_helpers + 1, (start_ix, end_ix))
self._scan_interval_tree = interval_tree
self.log("RT Tree: %r" % (self._scan_interval_tree.rt_tree))
def _make_transforming_process(self):
return ScanTransformingProcess(
self.ms_file,
self._input_queue,
self._output_queue,
self.scan_ids_exhausted_event,
ms1_peak_picking_args=self.ms1_peak_picking_args,
msn_peak_picking_args=self.msn_peak_picking_args,
ms1_deconvolution_args=self.ms1_deconvolution_args,
msn_deconvolution_args=self.msn_deconvolution_args,
envelope_selector=self._scan_interval_tree,
log_handler=self.log_controller.sender(),
ms1_averaging=self.ms1_averaging,
deconvolute=self.deconvoluting)
def _make_collator(self):
return ScanCollator(
self._output_queue, self.scan_ids_exhausted_event, self._deconv_helpers,
self._deconv_process, input_queue=self._input_queue,
include_fitted=not self.deconvoluting)
def _initialize_workers(self, start_scan=None, end_scan=None, max_scans=None):
try:
self._input_queue = JoinableQueue(int(1e6))
self._output_queue = JoinableQueue(int(1e6))
except OSError:
# Not all platforms permit limiting the size of queues
self._input_queue = JoinableQueue()
self._output_queue = JoinableQueue()
self._preindex_file()
if self.extract_only_tandem_envelopes:
self.log("Constructing Scan Interval Tree")
self._make_interval_tree(start_scan, end_scan)
self._terminate()
self._scan_yielder_process = ScanIDYieldingProcess(
self.ms_file, self._input_queue, start_scan=start_scan, end_scan=end_scan,
max_scans=max_scans, no_more_event=self.scan_ids_exhausted_event,
ignore_tandem_scans=self.ignore_tandem_scans, batch_size=1)
self._scan_yielder_process.start()
self._deconv_process = self._make_transforming_process()
self._deconv_helpers = []
for _i in range(self.number_of_helpers):
self._deconv_helpers.append(self._make_transforming_process())
self._deconv_process.start()
self._order_manager = self._make_collator()
def make_iterator(self, start_scan=None, end_scan=None, max_scans=None):
self._initialize_workers(start_scan, end_scan, max_scans)
for scan in self._order_manager:
self.time_cache[scan.id] = scan.scan_time
yield scan
self.log_controller.stop()
self.join()
self._terminate()
def configure_iteration(self, start_scan=None, end_scan=None, max_scans=None):
self._iterator = self.make_iterator(start_scan, end_scan, max_scans)
def convert_scan_id_to_retention_time(self, scan_id):
return self.time_cache[scan_id]
def close(self):
self._terminate()
|
log_message
|
main.rs
|
// Silence some warnings so they don't distract from the exercise.
#![allow(dead_code, unused_mut, unused_variables)]
fn main() {
// This collects any command-line arguments into a vector of Strings.
// For example:
//
// cargo run apple banana
//
// ...produces the equivalent of
//
// vec!["apple".to_string(), "banana".to_string()]
let args: Vec<String> = std::env::args().skip(1).collect();
// This consumes the `args` vector to iterate through each String
for arg in args {
// 1a. Your task: handle the command-line arguments!
//
// - If arg is "sum", then call the sum() function
// - If arg is "double", then call the double() function
// - If arg is anything else, then call the count() function, passing "arg" to it.
if arg == "sum" {
sum()
} else if arg == "double" {
double()
|
// 1b. Now try passing "sum", "double" and "bananas" to the program by adding your argument
// after "cargo run". For example "cargo run sum"
}
}
fn sum() {
let mut sum = 0;
// 2. Use a "for loop" to iterate through integers from 7 to 23 *inclusive* using a range
// and add them all together (increment the `sum` variable). Hint: You should get 255
// Run it with `cargo run sum`
for num in 7..=32 {
sum = sum + num;
println!("{}", sum);
}
println!("The sum is {}", sum);
}
fn double() {
let mut count = 0;
let mut x = 1;
// 3. Use a "while loop" to count how many times you can double the value of `x` (multiply `x`
// by 2) until `x` is larger than 500. Increment `count` each time through the loop. Run it
// with `cargo run double` Hint: The answer is 9 times.
while x < 500 {
x *= 2;
count += 1;
}
println!("You can double x {} times until x is larger than 500", count);
}
fn count(arg: String) {
// Challenge: Use an unconditional loop (`loop`) to print `arg` 8 times, and then break.
// You will need to count your loops, somehow. Run it with `cargo run bananas`
//
// print!("{} ", arg); // Execute this line 8 times, and then break. `print!` doesn't add a newline.
let mut count: i32 = 0;
loop {
print!("{} ", arg);
count += 1;
if count == 8 {
break
}
}
println!(); // This will output just a newline at the end for cleanliness.
}
|
} else {
count("arg".to_string())
}
|
App.js
|
import React from 'react';
import GlobalStyle from './globalStyles';
import { BrowserRouter as Router, Switch, Route } from 'react-router-dom';
import Navbar from './components/Navbar/Navbar';
//Pages
|
import Home from './pages/Home';
import SignUp from './pages/SignupPage';
import Pricing from './pages/PricingPage';
import Footer from './components/Footer/Footer';
function App() {
return (
<Router>
<GlobalStyle />
<Navbar />
<Switch>
<Route path="/" exact component={Grid} />
<Route path="/signup" exact component={SignUp} />
<Route path="/pricing" exact component={Pricing} />
</Switch>
<Footer />
</Router>
);
}
export default App;
| |
density_matrix.py
|
# Copyright 2019 PIQuIL - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import torch
from torch.nn import functional as F
from qucumber import _warn_on_missing_gpu
from qucumber.utils import cplx, unitaries
from qucumber.rbm import PurificationRBM
from .neural_state import NeuralStateBase
class DensityMatrix(NeuralStateBase):
r"""
:param num_visible: The number of visible units, i.e. the size of the system
:type num_visible: int
:param num_hidden: The number of units in the hidden layer
:type num_hidden: int
:param num_aux: The number of units in the purification layer
:type num_aux: int
:param unitary_dict: A dictionary associating bases with their unitary rotations
:type unitary_dict: dict[str, torch.Tensor]
:param gpu: Whether to perform computations on the default gpu.
:type gpu: bool
"""
_rbm_am = None
_rbm_ph = None
_device = None
def __init__(
self,
num_visible,
num_hidden=None,
num_aux=None,
unitary_dict=None,
gpu=False,
module=None,
):
if gpu and torch.cuda.is_available():
warnings.warn(
"Using DensityMatrix on GPU is not recommended due to poor performance compared to CPU.",
ResourceWarning,
2,
)
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
if module is None:
self.rbm_am = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)
self.rbm_ph = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)
else:
_warn_on_missing_gpu(gpu)
self.rbm_am = module.to(self.device)
self.rbm_am.device = self.device
self.rbm_ph = module.to(self.device).clone()
self.rbm_ph.device = self.device
self.num_visible = self.rbm_am.num_visible
self.num_hidden = self.rbm_am.num_hidden
self.num_aux = self.rbm_am.num_aux
self.device = self.rbm_am.device
self.unitary_dict = unitary_dict if unitary_dict else unitaries.create_dict()
self.unitary_dict = {
k: v.to(device=self.device) for k, v in self.unitary_dict.items()
}
@property
def networks(self):
return ["rbm_am", "rbm_ph"]
@property
def rbm_am(self):
return self._rbm_am
@rbm_am.setter
def rbm_am(self, new_val):
self._rbm_am = new_val
@property
def rbm_ph(self):
"""RBM used to learn the wavefunction phase."""
return self._rbm_ph
@rbm_ph.setter
def rbm_ph(self, new_val):
self._rbm_ph = new_val
@property
def device(self):
return self._device
@device.setter
def device(self, new_val):
self._device = new_val
def pi(self, v, vp, expand=True):
r"""Calculates elements of the :math:`\Pi` matrix.
If `expand` is `True`, will return a complex matrix
:math:`A_{ij} = \langle\sigma_i|\Pi|\sigma'_j\rangle`.
Otherwise will return a complex vector
:math:`A_{i} = \langle\sigma_i|\Pi|\sigma'_i\rangle`.
:param v: A batch of visible states, :math:`\sigma`.
:type v: torch.Tensor
:param vp: The other batch of visible state, :math:`\sigma'`.
:type vp: torch.Tensor
:param expand: Whether to return a matrix (`True`) or a vector (`False`).
:type expand: bool
:returns: The matrix elements given by :math:`\langle\sigma|\Pi|\sigma'\rangle`
:rtype: torch.Tensor
"""
m_am = F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias)
mp_am = F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias)
m_ph = F.linear(v, self.rbm_ph.weights_U)
mp_ph = F.linear(vp, self.rbm_ph.weights_U)
if expand and v.dim() >= 2:
m_am = m_am.unsqueeze_(1)
m_ph = m_ph.unsqueeze_(1)
if expand and vp.dim() >= 2:
mp_am = mp_am.unsqueeze_(0)
mp_ph = mp_ph.unsqueeze_(0)
exp_arg = (m_am + mp_am) / 2
phase = (m_ph - mp_ph) / 2
real = (
(1 + 2 * exp_arg.exp() * phase.cos() + (2 * exp_arg).exp())
.sqrt()
.log()
.sum(-1)
)
imag = torch.atan2(
(exp_arg.exp() * phase.sin()), (1 + exp_arg.exp() * phase.cos())
).sum(-1)
return cplx.make_complex(real, imag)
def pi_grad(self, v, vp, phase=False, expand=False):
r"""Calculates the gradient of the :math:`\Pi` matrix with
respect to the amplitude RBM parameters for two input states
:param v: One of the visible states, :math:`\sigma`
:type v: torch.Tensor
:param vp: The other visible state, :math`\sigma'`
:type vp: torch.Tensor
:param phase: Whether to compute the gradients for the phase RBM (`True`)
or the amplitude RBM (`False`)
:type phase: bool
:returns: The matrix element of the gradient given by
:math:`\langle\sigma|\nabla_\lambda\Pi|\sigma'\rangle`
:rtype: torch.Tensor
"""
unsqueezed = v.dim() < 2 or vp.dim() < 2
v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.rbm_am.weights_W)
vp = (vp.unsqueeze(0) if vp.dim() < 2 else vp).to(self.rbm_am.weights_W)
if expand:
arg_real = 0.5 * (
F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(1)
+ F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(
0
)
)
arg_imag = 0.5 * (
F.linear(v, self.rbm_ph.weights_U).unsqueeze_(1)
- F.linear(vp, self.rbm_ph.weights_U).unsqueeze_(0)
)
else:
arg_real = self.rbm_am.mixing_term(v + vp)
arg_imag = self.rbm_ph.mixing_term(v - vp)
sig = cplx.sigmoid(arg_real, arg_imag)
batch_sizes = (
(v.shape[0], vp.shape[0], *v.shape[1:-1]) if expand else (*v.shape[:-1],)
)
W_grad = torch.zeros_like(self.rbm_am.weights_W).expand(*batch_sizes, -1, -1)
vb_grad = torch.zeros_like(self.rbm_am.visible_bias).expand(*batch_sizes, -1)
hb_grad = torch.zeros_like(self.rbm_am.hidden_bias).expand(*batch_sizes, -1)
if phase:
temp = (v.unsqueeze(1) - vp.unsqueeze(0)) if expand else (v - vp)
sig = cplx.scalar_mult(sig, cplx.I)
ab_grad_real = torch.zeros_like(self.rbm_ph.aux_bias).expand(
*batch_sizes, -1
)
ab_grad_imag = ab_grad_real.clone()
else:
temp = (v.unsqueeze(1) + vp.unsqueeze(0)) if expand else (v + vp)
ab_grad_real = cplx.real(sig)
ab_grad_imag = cplx.imag(sig)
U_grad = 0.5 * torch.einsum("c...j,...k->c...jk", sig, temp)
U_grad_real = cplx.real(U_grad)
U_grad_imag = cplx.imag(U_grad)
vec_real = [
W_grad.view(*batch_sizes, -1),
U_grad_real.view(*batch_sizes, -1),
vb_grad,
hb_grad,
ab_grad_real,
]
vec_imag = [
W_grad.view(*batch_sizes, -1).clone(),
U_grad_imag.view(*batch_sizes, -1),
vb_grad.clone(),
hb_grad.clone(),
ab_grad_imag,
]
if unsqueezed and not expand:
vec_real = [grad.squeeze_(0) for grad in vec_real]
vec_imag = [grad.squeeze_(0) for grad in vec_imag]
return cplx.make_complex(
torch.cat(vec_real, dim=-1), torch.cat(vec_imag, dim=-1)
)
def rho(self, v, vp=None, expand=True):
r"""Computes the matrix elements of the (unnormalized) density matrix.
If `expand` is `True`, will return a complex matrix
:math:`A_{ij} = \langle\sigma_i|\widetilde{\rho}|\sigma'_j\rangle`.
Otherwise will return a complex vector
:math:`A_{i} = \langle\sigma_i|\widetilde{\rho}|\sigma'_i\rangle`.
:param v: One of the visible states, :math:`\sigma`.
:type v: torch.Tensor
:param vp: The other visible state, :math:`\sigma'`.
If `None`, will be set to `v`.
:type vp: torch.Tensor
:param expand: Whether to return a matrix (`True`) or a vector (`False`).
:type expand: bool
:returns: The elements of the current density matrix
:math:`\langle\sigma|\widetilde{\rho}|\sigma'\rangle`
:rtype: torch.Tensor
"""
if expand is False and vp is None:
return cplx.make_complex(self.probability(v))
elif vp is None:
vp = v
pi_ = self.pi(v, vp, expand=expand)
amp = (self.rbm_am.gamma(v, vp, eta=+1, expand=expand) + cplx.real(pi_)).exp()
phase = self.rbm_ph.gamma(v, vp, eta=-1, expand=expand) + cplx.imag(pi_)
return cplx.make_complex(amp * phase.cos(), amp * phase.sin())
def importance_sampling_numerator(self, vp, v):
return self.rho(vp, v, expand=False)
def importance_sampling_denominator(self, v):
return cplx.make_complex(self.probability(v))
def rotated_gradient(self, basis, sample):
r"""Computes the gradients rotated into the measurement basis
:param basis: The bases in which the measurement is made
:type basis: numpy.ndarray
:param sample: The measurement (either 0 or 1)
:type sample: torch.Tensor
:returns: A list of two tensors, representing the rotated gradients
of the amplitude and phase RBMs
:rtype: list[torch.Tensor, torch.Tensor]
"""
UrhoU, UrhoU_v, v = unitaries.rotate_rho_probs(
self, basis, sample, include_extras=True
)
inv_UrhoU = 1 / (UrhoU + 1e-8) # avoid dividing by zero
raw_grads = [self.am_grads(v), self.ph_grads(v)]
rotated_grad = [
-cplx.einsum("ijb,ijbg->bg", UrhoU_v, g, imag_part=False) for g in raw_grads
]
return [torch.einsum("b,bg->g", inv_UrhoU, g) for g in rotated_grad]
def am_grads(self, v):
r"""Computes the gradients of the amplitude RBM for given input states
:param v: The first input state, :math:`\sigma`
:type v: torch.Tensor
:returns: The gradients of all amplitude RBM parameters
:rtype: torch.Tensor
"""
return self.rbm_am.gamma_grad(v, v, eta=+1, expand=True) + self.pi_grad(
v, v, phase=False, expand=True
)
|
:param v: The first input state, :math:`\sigma`
:type v: torch.Tensor
:returns: The gradients of all phase RBM parameters
:rtype: torch.Tensor
"""
return cplx.scalar_mult( # need to multiply Gamma- by i
self.rbm_ph.gamma_grad(v, v, eta=-1, expand=True), cplx.I
) + self.pi_grad(v, v, phase=True, expand=True)
def fit(
self,
data,
epochs=100,
pos_batch_size=100,
neg_batch_size=None,
k=1,
lr=1,
input_bases=None,
progbar=False,
starting_epoch=1,
time=False,
callbacks=None,
optimizer=torch.optim.SGD,
optimizer_args=None,
scheduler=None,
scheduler_args=None,
**kwargs,
):
if input_bases is None:
raise ValueError("input_bases must be provided to train a DensityMatrix!")
else:
super().fit(
data=data,
epochs=epochs,
pos_batch_size=pos_batch_size,
neg_batch_size=neg_batch_size,
k=k,
lr=lr,
input_bases=input_bases,
progbar=progbar,
starting_epoch=starting_epoch,
time=time,
callbacks=callbacks,
optimizer=optimizer,
optimizer_args=optimizer_args,
scheduler=scheduler,
scheduler_args=scheduler_args,
**kwargs,
)
@staticmethod
def autoload(location, gpu=False):
state_dict = torch.load(location)
nn_state = DensityMatrix(
unitary_dict=state_dict["unitary_dict"],
num_visible=len(state_dict["rbm_am"]["visible_bias"]),
num_hidden=len(state_dict["rbm_am"]["hidden_bias"]),
num_aux=len(state_dict["rbm_am"]["aux_bias"]),
gpu=gpu,
)
nn_state.load(location)
return nn_state
|
def ph_grads(self, v):
r"""Computes the gradients of the phase RBM for given input states
|
GenderTransgenderIcon.js
|
'use strict';
function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; }
var React = _interopDefault(require('react'));
var _extends = Object.assign || function (target) {
for (var i = 1; i < arguments.length; i++) {
var source = arguments[i];
for (var key in source) {
if (Object.prototype.hasOwnProperty.call(source, key)) {
target[key] = source[key];
}
}
}
return target;
};
var objectWithoutProperties = function (obj, keys) {
var target = {};
for (var i in obj) {
if (keys.indexOf(i) >= 0) continue;
if (!Object.prototype.hasOwnProperty.call(obj, i)) continue;
target[i] = obj[i];
}
return target;
};
var GenderTransgenderIcon = function GenderTransgenderIcon(_ref) {
var _ref$color = _ref.color,
color = _ref$color === undefined ? 'currentColor' : _ref$color,
_ref$size = _ref.size,
size = _ref$size === undefined ? 24 : _ref$size,
|
return React.createElement(
'svg',
_extends({}, props, { className: className, width: size, height: size, fill: color, viewBox: '0 0 24 24' }),
React.createElement('path', { d: 'M19.58,3H15V1H23V9H21V4.41L16.17,9.24C16.69,10.03 17,11 17,12C17,14.42 15.28,16.44 13,16.9V19H15V21H13V23H11V21H9V19H11V16.9C8.72,16.44 7,14.42 7,12C7,11 7.3,10.04 7.82,9.26L6.64,8.07L5.24,9.46L3.83,8.04L5.23,6.65L3,4.42V8H1V1H8V3H4.41L6.64,5.24L8.08,3.81L9.5,5.23L8.06,6.66L9.23,7.84C10,7.31 11,7 12,7C13,7 13.96,7.3 14.75,7.83L19.58,3M12,9A3,3 0 0,0 9,12A3,3 0 0,0 12,15A3,3 0 0,0 15,12A3,3 0 0,0 12,9Z' })
);
};
var GenderTransgenderIcon$1 = React.memo ? React.memo(GenderTransgenderIcon) : GenderTransgenderIcon;
module.exports = GenderTransgenderIcon$1;
|
children = _ref.children,
props = objectWithoutProperties(_ref, ['color', 'size', 'children']);
var className = 'mdi-icon ' + (props.className || '');
|
lib.rs
|
extern crate proc_macro;
use crate::proc_macro::TokenStream;
use quote::quote;
use inflector::cases::pascalcase::to_pascal_case;
use syn::{parse_macro_input, Attribute, Data, DataStruct, DeriveInput, Fields, Ident};
fn get_sql_column_names(field_identifiers: &Vec<&Option<Ident>>) -> String {
// Adapt rust naming conventions to match the table columns.
// my_struct_field1, (Rust) -> MyStructField (SQL)
let field_names: Vec<String> = field_identifiers
.iter()
.map(|n| match n {
Some(e) => to_pascal_case(&e.to_string()),
_ => "".to_owned(),
})
.collect();
field_names.join(",")
}
fn get_table_name(implementing_type: &Ident, attrs: &Vec<Attribute>) -> String {
for attr in attrs {
if attr.path.is_ident("table") {
let lit: syn::LitStr = attr.parse_args().unwrap();
return lit.value();
}
}
// Use the type's name if no name was specified.
implementing_type.to_string()
}
fn get_sql_text(table_name: &str, column_names: &str, column_count: usize) -> String {
// Construct the SQL statement to insert the record.
let value_specifiers = vec!["?"; column_count].join(",");
format!(
"INSERT INTO {}({}) VALUES({})",
table_name, column_names, value_specifiers
)
}
#[proc_macro_derive(SqlInsert, attributes(table))]
pub fn sql_insert_derive(input: TokenStream) -> TokenStream
|
{
let input = parse_macro_input!(input as DeriveInput);
let implementing_type = &input.ident;
let fields = match &input.data {
Data::Struct(DataStruct {
fields: Fields::Named(fields),
..
}) => &fields.named,
_ => panic!("expected a struct with named fields"),
};
let field_identifiers: Vec<&Option<Ident>> = fields.iter().map(|field| &field.ident).collect();
let column_names = get_sql_column_names(&field_identifiers);
let table_name = get_table_name(implementing_type, &input.attrs);
let sql_text = get_sql_text(&table_name, &column_names, field_identifiers.len());
// Create the trait implementation.
let expanded = quote! {
impl crate::records::SqlInsert for #implementing_type {
fn insert(&self, transaction: &mut Transaction) -> Result<()> {
transaction
.prepare_cached(
#sql_text,
)?
.execute(params![
#(
self.#field_identifiers,
)*
])?;
Ok(())
}
}
};
TokenStream::from(expanded)
}
|
|
factory.py
|
#!/usr/bin/python3
import json
from iot_message.message import Message
import iot_message.exception as ex
class MessageFactory(object):
"""Class MessageFactory"""
@classmethod
def create(cls, data=None):
if data is None:
return Message()
else:
return cls._decode(data)
@classmethod
def
|
(cls, message):
try:
message = json.loads(message)
if not cls._validate_message(message):
return None
msg = Message()
msg.set(message)
msg.decrypt()
return msg
except ValueError:
raise ex.JsonException()
@classmethod
def _validate_message(cls, message):
""":return boolean"""
if 'protocol' not in message or 'targets' not in message or \
type(message['targets']) is not list:
return False
if message['protocol'] != Message.protocol:
return False
if Message.node_name not in message['targets'] and 'ALL' not in message['targets']:
return False
return True
|
_decode
|
user.go
|
package gorseclient
import "encoding/json"
func (a *API) CreateUser(input InsertUserParam) (*InsertUserResponse, error) {
jsoned, err := json.Marshal(input)
if err != nil {
return nil, err
}
if err := a.newRequest(endpointInsertUser(jsoned)).Error; err != nil {
return nil, err
}
body, err := a.do()
if err != nil {
return nil, err
}
var responseModel InsertUserResponse
err = a.jsonUnmarshal(body, &responseModel)
if err != nil
|
return &responseModel, nil
}
func (a *API) GetUserByUserID(userID string) (*GetUserResponse, error) {
if err := a.newRequest(endpointGetUserByUserID(userID)).Error; err != nil {
return nil, err
}
body, err := a.do()
if err != nil {
return nil, err
}
var responseModel GetUserResponse
err = a.jsonUnmarshal(body, &responseModel)
if err != nil {
return nil, err
}
return &responseModel, nil
}
func (a *API) DeleteUserByUserID(userID string) (*DeleteUserResponse, error) {
if err := a.newRequest(endpointDeleteUserByUserID(userID)).Error; err != nil {
return nil, err
}
body, err := a.do()
if err != nil {
return nil, err
}
var responseModel DeleteUserResponse
err = a.jsonUnmarshal(body, &responseModel)
if err != nil {
return nil, err
}
return &responseModel, nil
}
func (a *API) UpdateUserByUserID(userID string, input UpdateUserParam) (*UpdateUserResponse, error) {
jsoned, err := json.Marshal(input)
if err != nil {
return nil, err
}
if err := a.newRequest(endpointUpdateUserByUserID(userID, jsoned)).Error; err != nil {
return nil, err
}
body, err := a.do()
if err != nil {
return nil, err
}
var responseModel UpdateUserResponse
err = a.jsonUnmarshal(body, &responseModel)
if err != nil {
return nil, err
}
return &responseModel, nil
}
func (a *API) GetUsers(n int, cursor string)(*GetUsersResponse, error) {
if err := a.newRequest(endpointGetUsers(n, cursor)).Error; err != nil {
return nil, err
}
body, err := a.do()
if err != nil {
return nil, err
}
var responseModel GetUsersResponse
err = a.jsonUnmarshal(body, &responseModel)
if err != nil {
return nil, err
}
return &responseModel, nil
}
func (a *API) CreateUsers(input []*InsertUserParam) (*InsertUsersResponse, error) {
jsoned, err := json.Marshal(input)
if err != nil {
return nil, err
}
if err := a.newRequest(endpointInsertUsers(jsoned)).Error; err != nil {
return nil, err
}
body, err := a.do()
if err != nil {
return nil, err
}
var responseModel InsertUsersResponse
err = a.jsonUnmarshal(body, &responseModel)
if err != nil {
return nil, err
}
return &responseModel, nil
}
|
{
return nil, err
}
|
net_stream.py
|
from .net_stream_interface import INetStream
class NetStream(INetStream):
def __init__(self, muxed_stream):
|
def get_protocol(self):
"""
:return: protocol id that stream runs on
"""
return self.protocol_id
def set_protocol(self, protocol_id):
"""
:param protocol_id: protocol id that stream runs on
:return: true if successful
"""
self.protocol_id = protocol_id
async def read(self):
"""
read from stream
:return: bytes of input until EOF
"""
return await self.muxed_stream.read()
async def write(self, data):
"""
write to stream
:return: number of bytes written
"""
return await self.muxed_stream.write(data)
async def close(self):
"""
close stream
:return: true if successful
"""
await self.muxed_stream.close()
return True
|
self.muxed_stream = muxed_stream
self.mplex_conn = muxed_stream.mplex_conn
self.protocol_id = None
|
pseudomerlin.rs
|
use digest::{generic_array, Digest};
use generic_array::typenum::{IsGreaterOrEqual, B1, U32};
use mc_crypto_digestible::DigestTranscript;
/// An object which implements the DigestTranscript API over a cryptographic
/// digest function.
///
/// `append_bytes(context, data)` is implemented by providing framing for
/// context, then appending it, then providing framing for data, then appending
/// it.
///
/// As long as the chosen digest function used is actually collision-resistant,
/// then this can be used as a drop-in for MerlinTranscript, for purposes of
/// Digestible crate.
///
/// This is not and cannot be a fully-general drop-in for Merlin transcripts,
/// especially when multiple rounds of challenge-bytes extraction are taking
/// place.
///
/// The best use-case for something like this is when e.g. you MUST create an
/// ed25519ph signature, and MUST have a SHA512 hasher into which your structure
/// has been correctly marshalled.
pub struct PseudoMerlin<D>
where
D: Digest,
D::OutputSize: IsGreaterOrEqual<U32, Output = B1>,
{
pub inner: D,
}
#[allow(non_snake_case)]
#[inline]
pub fn PseudoMerlin<D>(digest: D) -> PseudoMerlin<D>
where
D: Digest,
D::OutputSize: IsGreaterOrEqual<U32, Output = B1>,
{
PseudoMerlin { inner: digest }
}
impl<D> DigestTranscript for PseudoMerlin<D>
where
D: Digest,
D::OutputSize: IsGreaterOrEqual<U32, Output = B1>,
{
#[inline]
fn new() -> Self {
Self { inner: D::new() }
}
#[inline]
fn append_bytes(&mut self, context: &'static [u8], data: impl AsRef<[u8]>)
|
#[inline]
fn extract_digest(self, output: &mut [u8; 32]) {
let result = self.inner.finalize();
output.copy_from_slice(&result[..32]);
}
}
|
{
// This is meant to closely mimic merlin's STROBE updates
// https://merlin.cool/transcript/ops.html#appending-messages
self.inner.update((context.len() as u32).to_le_bytes());
self.inner.update(context);
let data = data.as_ref();
self.inner.update((data.len() as u32).to_le_bytes());
self.inner.update(data);
}
|
migration.go
|
package m87to88
import (
"embed"
"github.com/pkg/errors"
"github.com/stackrox/rox/generated/storage"
"github.com/stackrox/rox/migrator/migrations"
"github.com/stackrox/rox/migrator/migrations/policymigrationhelper"
"github.com/stackrox/rox/migrator/types"
bolt "go.etcd.io/bbolt"
)
var (
migration = types.Migration{
StartingSeqNum: 87,
VersionAfter: storage.Version{SeqNum: 88},
Run: func(databases *types.Databases) error {
err := updatePolicies(databases.BoltDB)
if err != nil {
return errors.Wrap(err, "updating policies")
}
return nil
},
}
//go:embed policies_before_and_after
policyDiffFS embed.FS
// We will want to migrate only if the existing policy sections and title haven't changed.
fieldsToCompare = []policymigrationhelper.FieldComparator{
policymigrationhelper.PolicySectionComparator,
policymigrationhelper.NameComparator,
}
policyDiffs = []policymigrationhelper.PolicyDiff{
{
FieldsToCompare: fieldsToCompare,
PolicyFileName: "access_central_secret.json",
},
}
)
func updatePolicies(db *bolt.DB) error
|
func init() {
migrations.MustRegisterMigration(migration)
}
|
{
return policymigrationhelper.MigratePoliciesWithDiffs(db, policyDiffFS, policyDiffs)
}
|
plttraj.py
|
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.size'] = 9
ndim = 6
data = np.genfromtxt('dOTD_tst1.out')
xticks = [900, 1100, 1300]
yticks = [[0.7, 0.8, 0.9, 1],
[-0.2, 0, 0.2, 0.4],
[-0.5, 0, 0.5],
[-1, -0.5, 0],
[-0.5, 0, 0.5],
[-0.5, 0, 0.5, 1]]
def
|
(ticklabels):
"""Manually set LaTeX format for tick labels."""
return [r"$" + str(label) + "$" for label in ticklabels]
for ii in range(ndim):
fig = plt.figure(figsize=(2.2,1.3), constrained_layout=True)
fig.set_constrained_layout_pads(w_pad=0, h_pad=0)
ax = plt.axes()
plt.plot(data[:,0], data[:,ii+1], 'k-', linewidth=0.75)
plt.xlabel('$t$')
plt.ylabel('$z_{' + str(ii+1) + '}$')
plt.xlim(xticks[0], xticks[-1])
plt.ylim(yticks[ii][0], yticks[ii][-1])
ax.set_xticks(xticks)
ax.set_yticks(yticks[ii])
ax.set_xticklabels(latexify(xticks))
ax.set_yticklabels(latexify(yticks[ii]))
ax.yaxis.set_label_coords(-0.2, 0.5)
ax.tick_params(direction='in', length=2)
plt.savefig('traj' + str(ii+1) + '.pdf')
|
latexify
|
order-by.module.ts
|
import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { ImmutableService } from '../shared/immutable.service';
import { OrderByPipe } from './order-by.pipe';
import { OrderByObjectPipe } from './order-by-object.pipe';
import { OrderByService } from './order-by.service';
@NgModule({
imports: [
CommonModule,
],
declarations: [
OrderByPipe,
|
exports: [
OrderByPipe,
OrderByObjectPipe,
],
providers: [
ImmutableService,
OrderByService,
]
})
export class OrderByModule { }
|
OrderByObjectPipe,
],
|
day7.rs
|
use std::{cmp, str::FromStr};
|
use aoc_runner_derive::aoc;
pub fn input_generator(input: &str) -> Vec<i32> {
input
.trim()
.split(",")
.map(|v| i32::from_str(v).unwrap())
.collect::<Vec<i32>>()
}
#[aoc(day7, part1)]
pub fn part1(input: &str) -> i32 {
let pos = input_generator(input);
let mut buckets = vec![0; 2000];
for p in pos {
buckets[p as usize] += 1;
}
let mut minimum = i32::MAX;
for i in 0..=2000 {
let total = buckets
.iter()
.enumerate()
.fold(0, |acc, (p, v)| acc + ((p as i32 - i).abs() * v));
minimum = cmp::min(minimum, total);
}
minimum
}
#[aoc(day7, part2)]
pub fn part2(input: &str) -> i32 {
let pos = input_generator(input);
let mut buckets = vec![0; 2000];
for p in pos {
buckets[p as usize] += 1;
}
let mut minimum = i32::MAX;
'a: for i in 0..=2000 {
let mut total = 0;
for (p, t) in buckets.iter().enumerate() {
if *t == 0 {
continue;
}
let n = (p as i32 - i).abs();
// for j in 1..=(p as i32 - i).abs() {
// total += j * t;
// }
total += ((n * (n + 1)) / 2) * t;
if total > minimum {
continue 'a;
}
}
minimum = cmp::min(minimum, total);
}
minimum
}
#[cfg(test)]
mod tests {
use indoc::indoc;
use super::*;
static EXAMPLE: &'static str = indoc! {"
16,1,2,0,4,2,7,1,2,14
"};
#[test]
fn example_part1() {
assert_eq!(part1(&EXAMPLE), 37);
}
#[test]
fn example_part2() {
assert_eq!(part2(&EXAMPLE), 168);
}
}
| |
non_spatial.rs
|
use anyhow::{Context, Result};
#[cfg(feature = "necsim-classical")]
use necsim_classical::ClassicalSimulation;
#[cfg(feature = "necsim-cuda")]
use necsim_cuda::CudaSimulation;
#[cfg(feature = "necsim-gillespie")]
use necsim_gillespie::GillespieSimulation;
#[cfg(feature = "necsim-skipping-gillespie")]
use necsim_skipping_gillespie::SkippingGillespieSimulation;
#[cfg(feature = "necsim-independent")]
use necsim_independent::IndependentSimulation;
use necsim_impls_no_std::reporter::ReporterContext;
#[allow(unused_imports)]
use necsim_impls_no_std::simulation::non_spatial::NonSpatialSimulation;
use necsim_impls_no_std::partitioning::LocalPartition;
#[allow(unused_imports)]
use crate::args::{Algorithm, CommonArgs, NonSpatialArgs};
#[allow(unreachable_code)]
#[allow(unused_variables)]
#[allow(clippy::needless_pass_by_value)]
pub fn
|
<R: ReporterContext, P: LocalPartition<R>>(
common_args: CommonArgs,
non_spatial_args: NonSpatialArgs,
local_partition: &mut P,
) -> Result<(f64, u64)> {
info!(
"Setting up the non-spatial {} coalescence algorithm ...",
common_args.algorithm
);
#[allow(clippy::match_single_binding)]
#[allow(clippy::map_err_ignore)]
let result: Result<(f64, u64)> = match common_args.algorithm {
#[cfg(feature = "necsim-classical")]
Algorithm::Classical => ClassicalSimulation::simulate(
non_spatial_args.area,
non_spatial_args.deme,
common_args.speciation_probability_per_generation.get(),
common_args.sample_percentage.get(),
common_args.seed,
local_partition,
(),
)
.map_err(|_| unreachable!("Non-Spatial ClassicalSimulation can never fail.")),
#[cfg(feature = "necsim-gillespie")]
Algorithm::Gillespie => GillespieSimulation::simulate(
non_spatial_args.area,
non_spatial_args.deme,
common_args.speciation_probability_per_generation.get(),
common_args.sample_percentage.get(),
common_args.seed,
local_partition,
(),
)
.map_err(|_| unreachable!("Non-Spatial GillespieSimulation can never fail.")),
#[cfg(feature = "necsim-skipping-gillespie")]
Algorithm::SkippingGillespie(auxiliary) => SkippingGillespieSimulation::simulate(
non_spatial_args.area,
non_spatial_args.deme,
common_args.speciation_probability_per_generation.get(),
common_args.sample_percentage.get(),
common_args.seed,
local_partition,
auxiliary,
)
.map_err(|_| unreachable!("Non-Spatial SkippingGillespieSimulation can never fail.")),
#[cfg(feature = "necsim-cuda")]
Algorithm::Cuda(auxiliary) => CudaSimulation::simulate(
non_spatial_args.area,
non_spatial_args.deme,
common_args.speciation_probability_per_generation.get(),
common_args.sample_percentage.get(),
common_args.seed,
local_partition,
auxiliary,
),
#[cfg(feature = "necsim-independent")]
Algorithm::Independent(auxiliary) => IndependentSimulation::simulate(
non_spatial_args.area,
non_spatial_args.deme,
common_args.speciation_probability_per_generation.get(),
common_args.sample_percentage.get(),
common_args.seed,
local_partition,
auxiliary,
)
.map_err(|_| unreachable!("Non-Spatial IndependentSimulation can never fail.")),
#[allow(unreachable_patterns)]
_ => anyhow::bail!("rustcoalescence does not support the selected algorithm"),
};
result.with_context(|| "Failed to run the non-spatial simulation.")
}
|
simulate
|
setup.py
|
import setuptools
setuptools.setup(
name="sb6183_exporter",
version="0.0.1",
author="Steven Brudenell",
author_email="[email protected]",
packages=setuptools.find_packages(),
|
"prometheus_client>=0.2.0",
],
entry_points={
"console_scripts": [
"sb6183_exporter = sb6183_exporter:exporter_main",
],
},
)
|
install_requires=[
"requests>=2.18.4",
"beautifulsoup4>=4.6.0",
|
take.py
|
class Take(object):
def __init__(self, stage, unit, entity,
not_found_proc, finished_proc):
self._stage = stage
self._unit = unit
self._entity = entity
self._finished_proc = finished_proc
self._not_found_proc = not_found_proc
def enact(self):
if not self._entity.location \
or self._entity.location != (self._unit.x, self._unit.y):
self._not_found_proc()
return
self._entity.location = None
self._stage.delete_entity(self._entity)
|
self._finished_proc()
return
|
|
proj-regression-EPSG-3857-20.py
|
from Magics.macro import *
import os
def plot_area(epsg, llx, lly, urx, ury):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} : [{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(epsg, llx, lly, urx, ury)
#Setting output
png = output(
|
#Setting the geographical area
area = mmap(
subpage_lower_left_latitude = lly,
subpage_lower_left_longitude = llx,
subpage_map_projection = epsg,
subpage_upper_right_latitude = ury,
subpage_upper_right_longitude = urx,
subpage_map_area_definition = "corners"
)
#Setting the coastlines
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
#Picking the grib metadata
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
#Plotting
plot(png,area,background,title,)
plot_area("EPSG:3857", -19.537526614209707, 21.73608176192727, 45.466740592414304, 81.98066721424705 )
|
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
|
_functional.py
|
r"""Functional interface, port from torch/optim/_function.py"""
import torch
from torch import Tensor
from typing import List, Optional
def is_master_weight(param, params_attr):
return (
param.dtype == torch.float and
param in params_attr and
'bf16_param' in params_attr[param]
)
def get_bf16_grad(param, params_attr):
assert is_master_weight(param, params_attr)
return params_attr[param]['bf16_param'].grad
def _make_sparse(grad, grad_indices, values):
size = grad.size()
if grad_indices.numel() == 0 or values.numel() == 0:
return torch.empty_like(grad)
return torch.sparse_coo_tensor(grad_indices, values, size)
def _adagrad_impl(
params: List[Tensor],
grads: List[Tensor],
state_sums: List[Tensor],
state_steps: List[int],
attr: dict,
lr: float,
weight_decay: float,
lr_decay: float,
eps: float,
fused: bool,
):
r"""Functional API that performs Adagrad algorithm computation.
See :class:`~torch.optim.Adagrad` for details.
"""
for (param, grad, state_sum, step) in zip(params, grads, state_sums, state_steps):
param2 = torch.Tensor()
if param in attr:
if 'trail' in attr[param]:
assert param.dtype is torch.bfloat16
param2 = attr[param]['trail']
if 'bf16_param' in attr[param]:
assert param.dtype is torch.float
param2 = attr[param]['bf16_param']
if fused and not grad.is_sparse:
torch.ops.torch_ipex.adagrad_fused_step(
param,
grad,
state_sum,
param2,
step,
lr,
weight_decay,
lr_decay,
eps)
continue
if weight_decay != 0:
if grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad = grad.add(param, alpha=weight_decay)
clr = lr / (1 + (step - 1) * lr_decay)
if grad.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2)))
std = state_sum.sparse_mask(grad)
std_values = std._values().sqrt_().add_(eps)
param.add_(_make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr)
else:
state_sum.addcmul_(grad, grad, value=1)
std = state_sum.sqrt().add_(eps)
param.addcdiv_(grad, std, value=-clr)
|
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
state_sums = []
state_steps = []
for p in group['params']:
grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad
if grad is not None:
params_with_grad.append(p)
grads.append(grad)
state = self.state[p]
state_sums.append(state['sum'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
_adagrad_impl(
params_with_grad,
grads,
state_sums,
state_steps,
self.params_attr,
group['lr'],
group['weight_decay'],
group['lr_decay'],
group['eps'],
self.fused)
return loss
def _sgd_non_fused_micro_step(
params: Tensor,
d_p_list: Tensor,
momentum_buffer_list: Optional[Tensor],
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
):
if weight_decay != 0:
d_p = d_p.add(param, alpha=weight_decay)
if momentum != 0:
buf = momentum_buffer_list[i]
if buf is None:
buf = torch.clone(d_p).detach()
momentum_buffer_list[i] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
param.add_(d_p, alpha=alpha)
def _sgd_impl(
params: List[Tensor],
d_p_list: List[Tensor],
attr: dict,
momentum_buffer_list: List[Optional[Tensor]],
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
fused: bool
):
r"""Functional API that performs SGD algorithm computation.
See :class:`~torch.optim.SGD` for details.
"""
for i, param in enumerate(params):
d_p = d_p_list[i]
param2 = torch.Tensor()
if param in attr:
if 'trail' in attr[param]:
assert param.dtype is torch.bfloat16
param2 = attr[param]['trail']
if 'bf16_param' in attr[param]:
assert param.dtype is torch.float
param2 = attr[param]['bf16_param']
if fused and not d_p.is_sparse:
momentum_buffer_list[i] = torch.ops.torch_ipex.sgd_fused_step(
param,
d_p,
momentum_buffer_list[i],
param2,
momentum,
lr,
weight_decay,
dampening,
nesterov)
continue
if (
d_p.is_sparse and
d_p.dtype == torch.bfloat16 and
weight_decay == 0 and
momentum == 0
):
# packed_add can support sparse tensor
torch.ops.torch_ipex.packed_add(param, param2, d_p, alpha=-lr)
else:
# no special optimize for other non fused case, fall back to naive implementation
d_p = d_p.to(param.dtype)
_sgd_non_fused_micro_step(
param,
d_p,
momentum_buffer_list[i],
momentum,
lr,
weight_decay,
dampening,
nesterov
)
@torch.no_grad()
def sgd_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
lr = group['lr']
for p in group['params']:
grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad
if grad is not None:
params_with_grad.append(p)
d_p_list.append(grad)
state = self.state[p]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
_sgd_impl(
params_with_grad,
d_p_list,
self.params_attr,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=nesterov,
fused=self.fused)
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state['momentum_buffer'] = momentum_buffer
return loss
def _lamb_fused_impl(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
attr: dict,
state_steps: List[int],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
):
r"""Functional API that performs Lamb algorithm computation.
See :class:`~torch.optim.Lamb` for details.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
param2 = torch.Tensor()
if param in attr:
if 'trail' in attr[param]:
assert param.dtype is torch.bfloat16
param2 = attr[param]['trail']
if 'bf16_param' in attr[param]:
assert param.dtype is torch.float
param2 = attr[param]['bf16_param']
torch.ops.torch_ipex.lamb_fused_step(
param,
exp_avg,
exp_avg_sq,
grad,
param2,
step,
beta1,
beta2,
lr,
weight_decay,
eps)
def _lamb_impl(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[int],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
):
r"""Functional API that performs Lamb algorithm computation.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
grad = grad.to(exp_avg.dtype)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
adam_step = (exp_avg / bias_correction1) / ((exp_avg_sq / bias_correction2).sqrt() + eps)
if weight_decay != 0:
adam_step.add_(param, alpha=weight_decay)
weight_norm = param.norm(p=2)
rtw_norm = adam_step.norm(p=2)
true_ratio = weight_norm / rtw_norm
param.add_(adam_step, alpha=-lr * true_ratio)
@torch.no_grad()
def lamb_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
trails = []
state_steps = []
for p in group['params']:
grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad
if grad is not None:
params_with_grad.append(p)
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients')
if grad.device != torch.device('cpu'):
raise RuntimeError('Lamb supports only CPU device')
grads.append(grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
buffer_dtype = p.dtype if p.dtype is torch.float64 else torch.float
state['exp_avg'] = torch.zeros(p.shape, dtype=buffer_dtype)
state['exp_avg_sq'] = torch.zeros(p.shape, dtype=buffer_dtype)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
beta1, beta2 = group['betas']
_lamb_fused_impl(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
self.params_attr,
state_steps,
beta1,
beta2,
group['lr'],
group['weight_decay'],
group['eps'])
return loss
|
@torch.no_grad()
def adagrad_step(self, closure=None):
|
pandora.py
|
"""
Component for controlling Pandora stations through the pianobar client.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/media_player.pandora/
"""
import logging
import re
import os
import signal
from datetime import timedelta
import shutil
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, MEDIA_TYPE_MUSIC,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_PLAY,
SUPPORT_SELECT_SOURCE, SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PLAY, SERVICE_VOLUME_UP, SERVICE_VOLUME_DOWN,
MediaPlayerDevice)
from homeassistant.const import (STATE_OFF, STATE_PAUSED, STATE_PLAYING,
STATE_IDLE)
from homeassistant import util
REQUIREMENTS = ['pexpect==4.0.1']
_LOGGER = logging.getLogger(__name__)
# SUPPORT_VOLUME_SET is close to available but we need volume up/down
# controls in the GUI.
PANDORA_SUPPORT = \
SUPPORT_PAUSE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_NEXT_TRACK | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
CMD_MAP = {SERVICE_MEDIA_NEXT_TRACK: 'n',
SERVICE_MEDIA_PLAY_PAUSE: 'p',
SERVICE_MEDIA_PLAY: 'p',
SERVICE_VOLUME_UP: ')',
SERVICE_VOLUME_DOWN: '('}
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=2)
CURRENT_SONG_PATTERN = re.compile(r'"(.*?)"\s+by\s+"(.*?)"\son\s+"(.*?)"',
re.MULTILINE)
STATION_PATTERN = re.compile(r'Station\s"(.+?)"', re.MULTILINE)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the media player pandora platform."""
if not _pianobar_exists():
return False
pandora = PandoraMediaPlayer('Pandora')
# make sure we end the pandora subprocess on exit in case user doesn't
# power it down.
def _stop_pianobar(_event):
pandora.turn_off()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_pianobar)
add_devices([pandora])
class PandoraMediaPlayer(MediaPlayerDevice):
"""A media player that uses the Pianobar interface to Pandora."""
def __init__(self, name):
"""Initialize the demo device."""
MediaPlayerDevice.__init__(self)
self._name = name
self._player_state = STATE_OFF
self._station = ''
self._media_title = ''
self._media_artist = ''
self._media_album = ''
self._stations = []
self._time_remaining = 0
self._media_duration = 0
self._pianobar = None
@property
def should_poll(self):
"""Should be polled for current state."""
return True
@property
def name(self):
"""Return the name of the media player."""
return self._name
@property
def state(self):
"""Return the state of the player."""
return self._player_state
def turn_on(self):
"""Turn the media player on."""
import pexpect
if self._player_state != STATE_OFF:
return
self._pianobar = pexpect.spawn('pianobar')
_LOGGER.info('Started pianobar subprocess')
mode = self._pianobar.expect(['Receiving new playlist',
'Select station:',
'Email:'])
if mode == 1:
# station list was presented. dismiss it.
self._pianobar.sendcontrol('m')
elif mode == 2:
_LOGGER.warning('The pianobar client is not configured to log in. '
'Please create a config file for it as described '
'at https://home-assistant.io'
'/components/media_player.pandora/')
# pass through the email/password prompts to quit cleanly
self._pianobar.sendcontrol('m')
self._pianobar.sendcontrol('m')
self._pianobar.terminate()
self._pianobar = None
return
self._update_stations()
self.update_playing_status()
self._player_state = STATE_IDLE
self.schedule_update_ha_state()
def turn_off(self):
"""Turn the media player off."""
import pexpect
if self._pianobar is None:
_LOGGER.info('Pianobar subprocess already stopped')
return
self._pianobar.send('q')
try:
_LOGGER.info('Stopped Pianobar subprocess')
self._pianobar.terminate()
except pexpect.exceptions.TIMEOUT:
# kill the process group
os.killpg(os.getpgid(self._pianobar.pid), signal.SIGTERM)
_LOGGER.info('Killed Pianobar subprocess')
self._pianobar = None
self._player_state = STATE_OFF
self.schedule_update_ha_state()
def media_play(self):
"""Send play command."""
self._send_pianobar_command(SERVICE_MEDIA_PLAY_PAUSE)
self._player_state = STATE_PLAYING
self.schedule_update_ha_state()
def media_pause(self):
"""Send pause command."""
self._send_pianobar_command(SERVICE_MEDIA_PLAY_PAUSE)
self._player_state = STATE_PAUSED
self.schedule_update_ha_state()
def media_next_track(self):
"""Go to next track."""
self._send_pianobar_command(SERVICE_MEDIA_NEXT_TRACK)
self.schedule_update_ha_state()
@property
def supported_features(self):
"""Flag media player features that are supported."""
return PANDORA_SUPPORT
@property
def source(self):
"""Name of the current input source."""
return self._station
@property
def source_list(self):
"""List of available input sources."""
return self._stations
@property
def media_title(self):
"""Title of current playing media."""
self.update_playing_status()
return self._media_title
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._media_album
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
def select_source(self, source):
"""Choose a different Pandora station and play it."""
try:
station_index = self._stations.index(source)
except ValueError:
_LOGGER.warning('Station `%s` is not in list', source)
return
_LOGGER.info('Setting station %s, %d', source, station_index)
self._send_station_list_command()
self._pianobar.sendline('{}'.format(station_index))
self._pianobar.expect('\r\n')
self._player_state = STATE_PLAYING
def _send_station_list_command(self):
"""Send a station list command."""
import pexpect
self._pianobar.send('s')
try:
self._pianobar.expect('Select station:', timeout=1)
except pexpect.exceptions.TIMEOUT:
# try again. Buffer was contaminated.
self._clear_buffer()
self._pianobar.send('s')
self._pianobar.expect('Select station:')
def update_playing_status(self):
"""Query pianobar for info about current media_title, station."""
response = self._query_for_playing_status()
if not response:
return
self._update_current_station(response)
self._update_current_song(response)
self._update_song_position()
def _query_for_playing_status(self):
"""Query system for info about current track."""
import pexpect
self._clear_buffer()
self._pianobar.send('i')
try:
match_idx = self._pianobar.expect([br'(\d\d):(\d\d)/(\d\d):(\d\d)',
'No song playing',
'Select station',
'Receiving new playlist'])
except pexpect.exceptions.EOF:
_LOGGER.info('Pianobar process already exited.')
return None
self._log_match()
if match_idx == 1:
# idle.
response = None
elif match_idx == 2:
# stuck on a station selection dialog. Clear it.
_LOGGER.warning('On unexpected station list page.')
self._pianobar.sendcontrol('m') # press enter
self._pianobar.sendcontrol('m') # do it again b/c an 'i' got in
response = self.update_playing_status()
elif match_idx == 3:
_LOGGER.debug('Received new playlist list.')
response = self.update_playing_status()
else:
response = self._pianobar.before.decode('utf-8')
return response
def _update_current_station(self, response):
"""Update current station."""
station_match = re.search(STATION_PATTERN, response)
if station_match:
self._station = station_match.group(1)
_LOGGER.debug('Got station as: %s', self._station)
else:
_LOGGER.warning('No station match. ')
def _update_current_song(self, response):
"""Update info about current song."""
song_match = re.search(CURRENT_SONG_PATTERN, response)
if song_match:
(self._media_title, self._media_artist,
self._media_album) = song_match.groups()
_LOGGER.debug('Got song as: %s', self._media_title)
else:
_LOGGER.warning('No song match.')
@util.Throttle(MIN_TIME_BETWEEN_UPDATES)
def _update_song_position(self):
"""
Get the song position and duration.
It's hard to predict whether or not the music will start during init
so we have to detect state by checking the ticker.
"""
(cur_minutes, cur_seconds,
total_minutes, total_seconds) = self._pianobar.match.groups()
time_remaining = int(cur_minutes) * 60 + int(cur_seconds)
self._media_duration = int(total_minutes) * 60 + int(total_seconds)
if (time_remaining != self._time_remaining and
time_remaining != self._media_duration):
self._player_state = STATE_PLAYING
elif self._player_state == STATE_PLAYING:
self._player_state = STATE_PAUSED
self._time_remaining = time_remaining
def _log_match(self):
"""Log grabbed values from console."""
_LOGGER.debug('Before: %s\nMatch: %s\nAfter: %s',
repr(self._pianobar.before),
repr(self._pianobar.match),
repr(self._pianobar.after))
def _send_pianobar_command(self, service_cmd):
"""Send a command to Pianobar."""
command = CMD_MAP.get(service_cmd)
_LOGGER.debug('Sending pinaobar command %s for %s',
command, service_cmd)
if command is None:
_LOGGER.info('Command %s not supported yet', service_cmd)
self._clear_buffer()
self._pianobar.sendline(command)
def _update_stations(self):
|
def _clear_buffer(self):
"""
Clear buffer from pexpect.
This is necessary because there are a bunch of 00:00 in the buffer
"""
import pexpect
try:
while not self._pianobar.expect('.+', timeout=0.1):
pass
except pexpect.exceptions.TIMEOUT:
pass
except pexpect.exceptions.EOF:
pass
def _pianobar_exists():
"""Verify that Pianobar is properly installed."""
pianobar_exe = shutil.which('pianobar')
if pianobar_exe:
return True
else:
_LOGGER.warning('The Pandora component depends on the Pianobar '
'client, which cannot be found. Please install '
'using instructions at'
'https://home-assistant.io'
'/components/media_player.pandora/')
return False
|
"""List defined Pandora stations."""
self._send_station_list_command()
station_lines = self._pianobar.before.decode('utf-8')
_LOGGER.debug('Getting stations: %s', station_lines)
self._stations = []
for line in station_lines.split('\r\n'):
match = re.search(r'\d+\).....(.+)', line)
if match:
station = match.group(1).strip()
_LOGGER.debug('Found station %s', station)
self._stations.append(station)
else:
_LOGGER.debug('No station match on `%s`', line)
self._pianobar.sendcontrol('m') # press enter with blank line
self._pianobar.sendcontrol('m') # do it twice in case an 'i' got in
|
models.py
|
from django.db import models
# Create your models here.
class Question(models.Model):
|
question_text = models.CharField(max_length = 200)
pub_date = models.DateTime
|
|
interface.rs
|
use crate::discriminative::model as m;
use std::ffi::CString;
use float_ord::FloatOrd;
use friday_inference;
use friday_error::{frierr, propagate, FridayError};
use friday_logging;
use friday_storage;
use friday_web;
use std::path::PathBuf;
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use serde_derive::Deserialize;
use serde_json;
#[derive(Deserialize)]
pub struct Config {
export_dir: PathBuf,
sensitivity: f32,
class_map: HashMap<String, i32>
}
pub struct Discriminative {
model: m::Model,
input: m::Tensor,
output: m::Tensor,
class_map: Arc<RwLock<Vec<String>>>,
sensitivity: Arc<RwLock<f32>>
}
fn class_map_to_class_vec(map: HashMap<String, i32>) -> Vec<String> {
// TODO is there a better way to convert it
// We just want to take a map
// "hi: 0
// "why": 1
// "who": 2
// ...
// and convert into
// ["hi", "why", "who"]
// Such that the order is preserved
let mut class_map_mappings_vec: Vec<(String, i32)> = map
.iter()
.map(|k| (k.0.clone(), k.1.clone()))
.collect();
class_map_mappings_vec.sort_by_key(|k| k.1);
// Finally we have the class_map
return class_map_mappings_vec.iter().map(|k| k.0.clone()).collect();
}
impl Discriminative {
pub fn new() -> Result<Discriminative, FridayError> {
return friday_storage::config::get_config("discriminative.json").map_or_else(
propagate!("Failed to create discriminative model"),
Discriminative::model_from_config
);
}
fn model_from_config(config: Config) ->
Result<Discriminative, FridayError> {
let maybe_input = CString::new("input");
let maybe_output = CString::new("output");
let class_map : Vec<String> = class_map_to_class_vec(config.class_map.clone());
return m::Model::new(config.export_dir.as_path())
.map_or_else(
|| frierr!("Failed to create model"),
|model| Discriminative::make_discriminative(
class_map,
config.sensitivity,
model,
maybe_input.unwrap(),
maybe_output.unwrap()));
}
fn make_discriminative(class_map: Vec<String>,
sensitivity: f32,
m: m::Model,
input_cstring: CString,
output_cstring: CString) -> Result<Discriminative, FridayError> {
let input_tensor = m::Tensor::new(&m, &input_cstring);
let output_tensor = m::Tensor::new(&m, &output_cstring);
output_tensor
.dims
.clone()
.first()
.map_or_else(
|| frierr!("Failed to read dimension of output tensor"),
|dim| {
if dim.clone() as usize != class_map.len() {
return frierr!("Class map size ({}) \
not matching output dimension of tensor ({})",
class_map.len(), dim);
}
return Ok(Discriminative {
model: m,
input: input_tensor,
output: output_tensor,
// Storing this behind a lock because the WebDiscriminativemight read it
// in another thread.
class_map: Arc::new(RwLock::new(class_map.clone())),
sensitivity: Arc::new(RwLock::new(sensitivity))
});
})
}
}
impl friday_inference::Model for Discriminative {
fn predict(&mut self, v :&Vec<i16>) -> Result<friday_inference::Prediction, FridayError> {
match self.class_map.read() {
Ok(class_map) => match self.sensitivity.read() {
Ok(sensitivity) => {
self.input.set_data(&v);
self.model.run(&mut self.input, &mut self.output);
let probabilities = self.output.get_data::<f32>();
let pred = probabilities
.iter()
.enumerate()
.max_by_key(|k| FloatOrd(k.1.clone()))
.map(|k| k.0)
.expect("failed to get max").clone();
friday_logging::info!("P({}) = {}", class_map[pred], probabilities[pred]);
if pred == 0 {
return Ok(friday_inference::Prediction::Silence);
}
if probabilities[pred] > sensitivity.clone() {
return Ok(friday_inference::Prediction::Result {
class: class_map[pred].clone(),
})
}
return Ok(friday_inference::Prediction::Inconclusive);
},
Err(err) => frierr!("Failed to read RWLocked sensitivity - Reason: {}", err)
},
Err(err) => frierr!("Failed to read RWLocked class_map - Reason: {}", err)
}
}
fn expected_frame_size(&self) -> usize {
return self.input
.dims
.first()
.expect("(tensorflow-models): Failed to extract input dims from model")
.clone() as usize;
}
fn reset(&mut self) -> Result<(), FridayError> {
Ok(())
}
}
pub struct WebDiscriminative{
endpoints: Vec<friday_web::endpoint::Endpoint>,
// These are shared with the Discriminative
class_map: Arc<RwLock<Vec<String>>>,
sensitivity: Arc<RwLock<f32>>
}
impl WebDiscriminative{
pub fn new(d: &Discriminative) -> WebDiscriminative{
WebDiscriminative{
endpoints: vec![
friday_web::endpoint::Endpoint{
name: "classes".to_owned(),
methods: vec![friday_web::core::Method::Get],
path: friday_web::path::Path::safe_new(
"/friday-inference/tensorflow-models/discriminative/classes")
},
friday_web::endpoint::Endpoint{
name: "sensitivity".to_owned(),
methods: vec![friday_web::core::Method::Get],
path: friday_web::path::Path::safe_new(
"/friday-inference/tensorflow-models/discriminative/sensitivity")
}
],
class_map: d.class_map.clone(),
sensitivity: d.sensitivity.clone()
}
}
fn class_map_response(&self) -> Result<friday_web::core::Response, FridayError> {
match self.class_map.read() {
Ok(class_map) => serde_json::to_string(&class_map.clone()).map_or_else(
|err| frierr!(
"Failed to serialize class_map - {:?} - to json - Reason: {}",
class_map,
err),
|content| Ok(friday_web::core::Response::JSON {
status: 200, content
})),
Err(err) => frierr!("Failed to read RWLocked class_map - Reason: {}", err)
}
}
fn sensitivity_response(&self) -> Result<friday_web::core::Response, FridayError> {
match self.sensitivity.read() {
Ok(sensitivity) => serde_json::to_string(&sensitivity.clone()).map_or_else(
|err| frierr!(
"Failed to serialize sensitivity - {:?} - to json - Reason: {}",
sensitivity,
err),
|content| Ok(friday_web::core::Response::JSON {
status: 200, content
})),
Err(err) => frierr!("Failed to read RWLocked sensitivity - Reason: {}", err)
}
}
}
impl friday_web::vendor::Vendor for WebDiscriminative{
fn name(&self) -> String { return "friday-inference/tensorflow-models/discriminative".to_owned() }
fn endpoints(&self) -> Vec<friday_web::endpoint::Endpoint>
|
fn handle(&mut self, r: &mut dyn friday_web::core::FridayRequest) -> Result<friday_web::core::Response, FridayError> {
friday_web::get_name(r, &self.endpoints).map_or_else(
propagate!("Failed to get 'Discriminiative' endpoint for {}", r.url()),
|name| match name.as_str() {
"classes" => self.class_map_response(),
"sensitivity" => self.sensitivity_response(),
_ => frierr!("Unknown endpoint name {}", name)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use friday_inference::Model;
use std::path::PathBuf;
use std::sync::Mutex;
use ureq;
#[test]
fn discriminative_model() {
let config = Config {
export_dir: PathBuf::from("test-resources/1603634879"),
class_map: [
(String::from("Silence"), 0),
(String::from("a"), 1),
(String::from("b"), 2),
(String::from("c"), 3),
(String::from("d"), 4),
(String::from("e"), 5),
(String::from("f"), 6),
(String::from("g"), 7),
(String::from("h"), 8),
(String::from("i"), 9)].iter().cloned().collect(),
sensitivity: 0.0
};
let mut model = Discriminative::model_from_config(config).expect("Failed to load Config");
let v: Vec<i16> = vec![1; 16000];
let pred: friday_inference::Prediction = model.predict(&v).expect("Failed to predict");
match pred {
friday_inference::Prediction::Result {
class,
} => assert_eq!(class, String::from("Silence")),
friday_inference::Prediction::Silence => friday_logging::info!("Got Silence"),
friday_inference::Prediction::Inconclusive => friday_logging::info!("Got Inconclusive")
}
}
use std::env;
#[test]
fn discriminative_web() {
env::set_var("FRIDAY_GUI", ".");
let config = Config {
export_dir: PathBuf::from("test-resources/1603634879"),
class_map: [
(String::from("Silence"), 0),
(String::from("a"), 1),
(String::from("b"), 2),
(String::from("c"), 3),
(String::from("d"), 4),
(String::from("e"), 5),
(String::from("f"), 6),
(String::from("g"), 7),
(String::from("h"), 8),
(String::from("i"), 9)].iter().cloned().collect(),
sensitivity: 0.0
};
let model = Discriminative::model_from_config(config).expect("Failed to load Config");
let web = WebDiscriminative::new(&model);
let mut server = friday_web::server::Server::new().expect("Failed to create web friday server");
server.register(vec![
Arc::new(Mutex::new(web))
]).expect("Failed to register discriminative web vendor");
let handle = server.listen("0.0.0.0:8000").expect("Failed to start server");
std::thread::sleep(std::time::Duration::from_millis(2000));
let resp = ureq::get(
"http://0.0.0.0:8000/friday-inference/tensorflow-models/discriminative/classes").call();
let class_map : Vec<String> = resp.into_json_deserialize::<Vec<String>>()
.expect("Failed to parse json response");
friday_logging::info!("Got class_map response: {:?}", class_map);
assert_eq!(class_map, model.class_map.read().unwrap().clone());
let resp = ureq::get(
"http://0.0.0.0:8000/friday-inference/tensorflow-models/discriminative/sensitivity").call();
let sensitivity : f32 = resp.into_json_deserialize::<f32>()
.expect("Failed to parse json response");
friday_logging::info!("Got sensitivity response: {:?}", sensitivity);
assert_eq!(sensitivity, model.sensitivity.read().unwrap().clone());
handle.stop();
}
}
|
{ return self.endpoints.clone(); }
|
icon_shopping_basket.rs
|
pub struct IconShoppingBasket {
props: crate::Props,
}
impl yew::Component for IconShoppingBasket {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn
|
(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M17.21 9l-4.38-6.56c-.19-.28-.51-.42-.83-.42-.32 0-.64.14-.83.43L6.79 9H2c-.55 0-1 .45-1 1 0 .09.01.18.04.27l2.54 9.27c.23.84 1 1.46 1.92 1.46h13c.92 0 1.69-.62 1.93-1.46l2.54-9.27L23 10c0-.55-.45-1-1-1h-4.79zM9 9l3-4.4L15 9H9zm3 8c-1.1 0-2-.9-2-2s.9-2 2-2 2 .9 2 2-.9 2-2 2z"/></svg>
</svg>
}
}
}
|
update
|
glog.go
|
//go:generate depstubber -vendor github.com/golang/glog "" Error,ErrorDepth,Errorf,Errorln,Exit,ExitDepth,Exitf,Exitln,Fatal,FatalDepth,Fatalf,Fatalln,Info,InfoDepth,Infof,Infoln,Warning,WarningDepth,Warningf,Warningln
//go:generate depstubber -vendor k8s.io/klog "" Error,ErrorDepth,Errorf,Errorln,Exit,ExitDepth,Exitf,Exitln,Fatal,FatalDepth,Fatalf,Fatalln,Info,InfoDepth,Infof,Infoln,Warning,WarningDepth,Warningf,Warningln
package main
import (
"github.com/golang/glog"
"k8s.io/klog"
)
func
|
() {
glog.Error(text) // $logger=text
glog.ErrorDepth(0, text) // $f-:logger=text
glog.Errorf(fmt, text) // $logger=fmt $logger=text
glog.Errorln(text) // $logger=text
glog.Exit(text) // $logger=text
glog.ExitDepth(0, text) // $f-:logger=text
glog.Exitf(fmt, text) // $logger=fmt $logger=text
glog.Exitln(text) // $logger=text
glog.Fatal(text) // $logger=text
glog.FatalDepth(0, text) // $f-:logger=text
glog.Fatalf(fmt, text) // $logger=fmt $logger=text
glog.Fatalln(text) // $logger=text
glog.Info(text) // $logger=text
glog.InfoDepth(0, text) // $f-:logger=text
glog.Infof(fmt, text) // $logger=fmt $logger=text
glog.Infoln(text) // $logger=text
glog.Warning(text) // $logger=text
glog.WarningDepth(0, text) // $f-:logger=text
glog.Warningf(fmt, text) // $logger=fmt $logger=text
glog.Warningln(text) // $logger=text
klog.Error(text) // $logger=text
klog.ErrorDepth(0, text) // $f-:logger=text
klog.Errorf(fmt, text) // $logger=fmt $logger=text
klog.Errorln(text) // $logger=text
klog.Exit(text) // $logger=text
klog.ExitDepth(0, text) // $f-:logger=text
klog.Exitf(fmt, text) // $logger=fmt $logger=text
klog.Exitln(text) // $logger=text
klog.Fatal(text) // $logger=text
klog.FatalDepth(0, text) // $f-:logger=text
klog.Fatalf(fmt, text) // $logger=fmt $logger=text
klog.Fatalln(text) // $logger=text
klog.Info(text) // $logger=text
klog.InfoDepth(0, text) // $f-:logger=text
klog.Infof(fmt, text) // $logger=fmt $logger=text
klog.Infoln(text) // $logger=text
klog.Warning(text) // $logger=text
klog.WarningDepth(0, text) // $f-:logger=text
klog.Warningf(fmt, text) // $logger=fmt $logger=text
klog.Warningln(text) // $logger=text
}
|
glogTest
|
rayon.rs
|
//! Parallelism for Units collection.
use super::{cmp, cmp_by2, Container, FxIndexMap, Units};
use crate::{distance::Distance, geometry::Point2, ids::UnitTypeId, unit::Unit};
use indexmap::map::rayon::{ParIter, ParIterMut, ParKeys, ParValues, ParValuesMut};
use rayon::{iter::plumbing::*, prelude::*};
use std::{borrow::Borrow, cmp::Ordering, iter::Sum};
#[inline]
fn cmp_by<U, T, F>(f: F) -> impl Fn(&&U, &&U) -> Ordering
where
T: PartialOrd,
F: Fn(&U) -> T + Send + Sync,
{
move |a, b| f(a).partial_cmp(&f(b)).unwrap()
}
impl Units {
/// Returns parallel iterator over the units of the collection.
#[inline]
pub fn par_iter(&self) -> ParValues<u64, Unit> {
self.0.par_values()
}
/// Returns mutable parallel iterator over the units of the collection.
#[inline]
pub fn par_iter_mut(&mut self) -> ParValuesMut<u64, Unit> {
self.0.par_values_mut()
}
/// Returns parallel iterator over (tag, unit) pairs of the collection.
#[inline]
pub fn par_pairs(&self) -> ParIter<u64, Unit> {
self.0.par_iter()
}
/// Returns mutable parallel iterator over (tag, unit) pairs of the collection.
#[inline]
pub fn par_pairs_mut(&mut self) -> ParIterMut<u64, Unit> {
self.0.par_iter_mut()
}
/// Returns parallel iterator over unit tags of the collection.
#[inline]
pub fn par_tags(&self) -> ParKeys<u64, Unit> {
self.0.par_keys()
}
/// Leaves only units that match given predicate and makes new collection of them.
///
/// Warning: This method will clone units in order to create a new collection
/// and will be evaluated initially. When applicable prefer using [`filter`]
/// on the iterator over units, since it's lazily evaluated and doesn't do any cloning operations.
///
/// [`filter`]: Iterator::filter
pub fn filter<F>(&self, f: F) -> Self
where
F: Fn(&&Unit) -> bool + Sync + Send,
{
Self(self.par_iter().filter(f).map(|u| (u.tag, u.clone())).collect())
}
/// Leaves only units of given types and makes a new collection of them.
///
/// Warning: This method will clone units in order to create a new collection
/// and will be evaluated initially. When applicable prefer using [`of_types`]
/// on the iterator over units, since it's lazily evaluated and doesn't do any cloning operations.
///
/// [`of_types`]: super::UnitsIterator::of_types
pub fn of_types<T: Container<UnitTypeId> + Sync>(&self, types: &T) -> Self {
self.filter(|u| types.contains(&u.type_id))
}
/// Excludes units of given types and makes a new collection of remaining units.
///
/// Warning: This method will clone units in order to create a new collection
/// and will be evaluated initially. When applicable prefer using [`exclude_types`]
/// on the iterator over units, since it's lazily evaluated and doesn't do any cloning operations.
///
/// [`exclude_types`]: super::UnitsIterator::exclude_types
pub fn exclude_types<T: Container<UnitTypeId> + Sync>(&self, types: &T) -> Self {
self.filter(|u| !types.contains(&u.type_id))
}
/// Leaves only units closer than given distance to target and makes new collection of them.
///
/// Warning: This method will clone units in order to create a new collection
/// and will be evaluated initially. When applicable prefer using [`closer`]
/// on the iterator over units, since it's lazily evaluated and doesn't do any cloning operations.
///
/// [`closer`]: crate::distance::DistanceIterator::closer
pub fn closer<P: Into<Point2> + Copy + Sync>(&self, distance: f32, target: P) -> Self {
self.filter(|u| u.is_closer(distance, target))
}
/// Leaves only units further than given distance to target and makes new collection of them.
///
/// Warning: This method will clone units in order to create a new collection
/// and will be evaluated initially. When applicable prefer using [`further`]
/// on the iterator over units, since it's lazily evaluated and doesn't do any cloning operations.
///
/// [`further`]: crate::distance::DistanceIterator::further
pub fn further<P: Into<Point2> + Copy + Sync>(&self, distance: f32, target: P) -> Self {
self.filter(|u| u.is_further(distance, target))
}
/// Returns closest from the collection unit to given target.
pub fn closest<P: Into<Point2> + Copy + Sync>(&self, target: P) -> Option<&Unit> {
self.min(|u| u.distance_squared(target))
}
/// Returns furthest from the collection unit to given target.
pub fn furthest<P: Into<Point2> + Copy + Sync>(&self, target: P) -> Option<&Unit> {
self.max(|u| u.distance_squared(target))
}
/// Returns distance from closest unit in the collection to given target.
pub fn closest_distance<P: Into<Point2> + Copy + Sync>(&self, target: P) -> Option<f32> {
self.min_value(|u| u.distance_squared(target))
.map(|dist| dist.sqrt())
}
/// Returns distance from furthest unit in the collection to given target.
pub fn furthest_distance<P: Into<Point2> + Copy + Sync>(&self, target: P) -> Option<f32> {
self.max_value(|u| u.distance_squared(target))
.map(|dist| dist.sqrt())
}
/// Returns squared distance from closest unit in the collection to given target.
pub fn closest_distance_squared<P: Into<Point2> + Copy + Sync>(&self, target: P) -> Option<f32> {
self.min_value(|u| u.distance_squared(target))
}
/// Returns squared distance from furthest unit in the collection to given target.
pub fn furthest_distance_squared<P: Into<Point2> + Copy + Sync>(&self, target: P) -> Option<f32> {
self.max_value(|u| u.distance_squared(target))
}
/// Returns sum of given unit values.
pub fn sum<T, F>(&self, f: F) -> T
where
T: Sum + Send,
F: Fn(&Unit) -> T + Send + Sync,
{
self.par_iter().map(f).sum::<T>()
}
/// Returns unit with minimum given predicate.
pub fn min<T, F>(&self, f: F) -> Option<&Unit>
where
T: PartialOrd,
F: Fn(&Unit) -> T + Send + Sync,
{
self.par_iter().min_by(cmp_by(f))
}
/// Returns minimum of given unit values.
pub fn min_value<T, F>(&self, f: F) -> Option<T>
where
T: PartialOrd + Send,
F: Fn(&Unit) -> T + Send + Sync,
{
self.par_iter().map(f).min_by(cmp)
}
/// Returns unit with maximum given predicate.
pub fn max<T, F>(&self, f: F) -> Option<&Unit>
where
T: PartialOrd,
F: Fn(&Unit) -> T + Sync + Send,
{
self.par_iter().max_by(cmp_by(f))
}
/// Returns maximum of given unit values.
pub fn max_value<T, F>(&self, f: F) -> Option<T>
where
T: PartialOrd + Send,
F: Fn(&Unit) -> T + Sync + Send,
{
self.par_iter().map(f).max_by(cmp)
}
/// Parallelly sorts the collection by given function.
pub fn par_sort<T, F>(&mut self, f: F)
where
T: PartialOrd,
F: Fn(&Unit) -> T + Sync + Send,
{
self.0.par_sort_by(cmp_by2(f));
}
/// Makes new collection parallelly sorted by given function.
/// Leaves original collection untouched.
pub fn par_sorted<T, F>(&self, f: F) -> Self
where
T: PartialOrd,
F: Fn(&Unit) -> T + Sync + Send,
{
let mut sorted = self.clone();
sorted.0.par_sort_by(cmp_by2(f));
sorted
}
}
impl IntoParallelIterator for Units {
type Item = Unit;
type Iter = IntoParUnits;
#[inline]
fn into_par_iter(self) -> Self::Iter {
IntoParUnits(self.0)
}
}
impl<'a> IntoParallelIterator for &'a Units {
type Item = &'a Unit;
type Iter = ParValues<'a, u64, Unit>;
#[inline]
fn into_par_iter(self) -> Self::Iter {
self.0.par_values()
}
}
impl<'a> IntoParallelIterator for &'a mut Units {
type Item = &'a mut Unit;
type Iter = ParValuesMut<'a, u64, Unit>;
#[inline]
fn into_par_iter(self) -> Self::Iter {
self.0.par_values_mut()
}
}
impl ParallelExtend<Unit> for Units {
#[inline]
fn par_extend<T: IntoParallelIterator<Item = Unit>>(&mut self, par_iter: T) {
self.0.par_extend(par_iter.into_par_iter().map(|u| (u.tag, u)));
}
}
impl ParallelExtend<(u64, Unit)> for Units {
#[inline]
fn par_extend<T: IntoParallelIterator<Item = (u64, Unit)>>(&mut self, par_iter: T) {
self.0.par_extend(par_iter);
}
}
impl FromParallelIterator<Unit> for Units {
#[inline]
fn from_par_iter<I: IntoParallelIterator<Item = Unit>>(par_iter: I) -> Self {
Self(par_iter.into_par_iter().map(|u| (u.tag, u)).collect())
}
}
impl FromParallelIterator<(u64, Unit)> for Units {
#[inline]
fn from_par_iter<I: IntoParallelIterator<Item = (u64, Unit)>>(par_iter: I) -> Self {
Self(par_iter.into_par_iter().collect())
}
}
/// Helper trait for parallel iterators over units.
pub trait ParUnitsIterator: ParallelIterator
where
Self::Item: Borrow<Unit>,
{
/// Searches for unit with given tag and returns it if found.
fn find_tag(self, tag: u64) -> Option<Self::Item> {
self.find_any(|u| u.borrow().tag == tag)
}
/// Leaves only units with given tags.
fn find_tags<T: Container<u64>>(self, tags: &T) -> FindTags<Self, T> {
FindTags::new(self, tags)
}
/// Leaves only units of given type.
fn of_type(self, unit_type: UnitTypeId) -> OfType<Self> {
OfType::new(self, unit_type)
}
/// Excludes units of given type.
fn exclude_type(self, unit_type: UnitTypeId) -> ExcludeType<Self> {
ExcludeType::new(self, unit_type)
}
/// Leaves only units of given types.
fn of_types<T: Container<UnitTypeId>>(self, types: &T) -> OfTypes<Self, T> {
OfTypes::new(self, types)
}
/// Excludes units of given types.
fn exclude_types<T: Container<UnitTypeId>>(self, types: &T) -> ExcludeTypes<Self, T> {
ExcludeTypes::new(self, types)
}
/// Leaves only non-flying units.
fn ground(self) -> Ground<Self> {
Ground::new(self)
}
/// Leaves only flying units.
fn flying(self) -> Flying<Self> {
Flying::new(self)
}
/// Leaves only ready structures.
fn ready(self) -> Ready<Self> {
Ready::new(self)
}
/// Leaves only structures in-progress.
fn not_ready(self) -> NotReady<Self> {
NotReady::new(self)
}
/// Leaves only units with no orders.
fn idle(self) -> Idle<Self> {
Idle::new(self)
}
/// Leaves only units with no orders or that almost finished their orders.
fn almost_idle(self) -> AlmostIdle<Self> {
AlmostIdle::new(self)
}
/// Leaves only units with no orders.
/// Unlike [`idle`](Self::idle) this takes reactor on terran buildings into account.
fn unused(self) -> Unused<Self> {
Unused::new(self)
}
/// Leaves only units with no orders or that almost finished their orders.
/// Unlike [`almost_idle`](Self::almost_idle) this takes reactor on terran buildings into account.
fn almost_unused(self) -> AlmostUnused<Self> {
AlmostUnused::new(self)
}
/// Leaves only units visible on current step.
fn visible(self) -> Visible<Self> {
Visible::new(self)
}
/// Leaves only units in attack range of given unit.
fn in_range_of(self, unit: &Unit, gap: f32) -> InRangeOf<Self> {
InRangeOf::new(self, unit, gap)
}
/// Leaves only units that are close enough to attack given unit.
fn in_range(self, unit: &Unit, gap: f32) -> InRange<Self> {
InRange::new(self, unit, gap)
}
/// Leaves only units in attack range of given unit.
/// Unlike [`in_range_of`](Self::in_range_of) this takes range upgrades into account.
fn in_real_range_of(self, unit: &Unit, gap: f32) -> InRealRangeOf<Self> {
InRealRangeOf::new(self, unit, gap)
}
/// Leaves only units that are close enough to attack given unit.
/// Unlike [`in_range`](Self::in_range) this takes range upgrades into account.
fn in_real_range(self, unit: &Unit, gap: f32) -> InRealRange<Self> {
InRealRange::new(self, unit, gap)
}
}
impl<I> ParUnitsIterator for I
where
I: ParallelIterator,
I::Item: Borrow<Unit>,
{
}
/// Owned parallel iterator over Units.
pub struct IntoParUnits(FxIndexMap<u64, Unit>);
impl ParallelIterator for IntoParUnits {
type Item = Unit;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.0.into_par_iter().map(|x| x.1).drive_unindexed(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.0.len())
}
}
impl IndexedParallelIterator for IntoParUnits {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
self.0.into_par_iter().map(|x| x.1).drive(consumer)
}
fn len(&self) -> usize {
self.0.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
self.0.into_par_iter().map(|x| x.1).with_producer(callback)
}
}
// Macros to generate parallel iterator implementation here
macro_rules! iterator_methods {
() => {
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let pred = self.predicate();
self.iter
.drive_unindexed(FilterConsumer::new(consumer, &pred))
}
};
}
macro_rules! impl_simple_iterator {
($name:ident $(<$a:lifetime>)?) => {
impl<$($a,)? I> ParallelIterator for $name<$($a,)? I>
where
I: ParallelIterator,
I::Item: Borrow<Unit>,
{
type Item = I::Item;
iterator_methods!();
}
};
}
macro_rules! make_simple_iterator {
($(#[$attr:meta])* $name:ident, $pred:expr) => {
$(#[$attr])*
#[derive(Clone)]
pub struct $name<I> {
iter: I,
}
impl<I> $name<I> {
pub(super) fn new(iter: I) -> Self {
Self { iter }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool {
$pred
}
}
impl_simple_iterator!($name);
};
}
// Consumer implementation
struct FilterConsumer<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, C, P> FilterConsumer<'p, C, P> {
fn new(base: C, filter_op: &'p P) -> Self {
FilterConsumer { base, filter_op }
}
}
impl<'p, T, C, P: 'p> Consumer<T> for FilterConsumer<'p, C, P>
where
C: Consumer<T>,
P: Fn(&Unit) -> bool + Sync,
T: Borrow<Unit>,
{
type Folder = FilterFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FilterConsumer::new(left, self.filter_op),
FilterConsumer::new(right, self.filter_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FilterFolder {
base: self.base.into_folder(),
filter_op: self.filter_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'p, T, C, P: 'p> UnindexedConsumer<T> for FilterConsumer<'p, C, P>
where
C: UnindexedConsumer<T>,
P: Fn(&Unit) -> bool + Sync,
T: Borrow<Unit>,
{
fn split_off_left(&self) -> Self {
FilterConsumer::new(self.base.split_off_left(), &self.filter_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FilterFolder<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, C, P, T> Folder<T> for FilterFolder<'p, C, P>
where
C: Folder<T>,
P: Fn(&Unit) -> bool + 'p,
T: Borrow<Unit>,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let filter_op = self.filter_op;
if filter_op(item.borrow()) {
let base = self.base.consume(item);
FilterFolder { base, filter_op }
} else
|
}
fn complete(self) -> Self::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}
// Parallel Iterator adaptors here
/// An iterator that filters units with given tags.
#[derive(Clone)]
pub struct FindTags<'a, I, T> {
iter: I,
tags: &'a T,
}
impl<'a, I, T: Container<u64>> FindTags<'a, I, T> {
pub(super) fn new(iter: I, tags: &'a T) -> Self {
Self { iter, tags }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool + 'a {
let tags = self.tags;
move |u| tags.contains(&u.tag)
}
}
impl<'a, I, T> ParallelIterator for FindTags<'a, I, T>
where
I: ParallelIterator,
I::Item: Borrow<Unit>,
T: Container<u64> + Sync,
{
type Item = I::Item;
iterator_methods!();
}
/// An iterator that filters units of given type.
#[derive(Clone)]
pub struct OfType<I> {
iter: I,
unit_type: UnitTypeId,
}
impl<I> OfType<I> {
pub(super) fn new(iter: I, unit_type: UnitTypeId) -> Self {
Self { iter, unit_type }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool {
let unit_type = self.unit_type;
move |u| u.type_id == unit_type
}
}
impl_simple_iterator!(OfType);
/// An iterator that filters out units of given type.
#[derive(Clone)]
pub struct ExcludeType<I> {
iter: I,
unit_type: UnitTypeId,
}
impl<I> ExcludeType<I> {
pub(super) fn new(iter: I, unit_type: UnitTypeId) -> Self {
Self { iter, unit_type }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool {
let unit_type = self.unit_type;
move |u| u.type_id != unit_type
}
}
impl_simple_iterator!(ExcludeType);
/// An iterator that filters units of given types.
#[derive(Clone)]
pub struct OfTypes<'a, I, T> {
iter: I,
types: &'a T,
}
impl<'a, I, T: Container<UnitTypeId>> OfTypes<'a, I, T> {
pub(super) fn new(iter: I, types: &'a T) -> Self {
Self { iter, types }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool + 'a {
let types = self.types;
move |u| types.contains(&u.type_id)
}
}
impl<'a, I, T> ParallelIterator for OfTypes<'a, I, T>
where
I: ParallelIterator,
I::Item: Borrow<Unit>,
T: Container<UnitTypeId> + Sync,
{
type Item = I::Item;
iterator_methods!();
}
/// An iterator that filters out units of given types.
#[derive(Clone)]
pub struct ExcludeTypes<'a, I, T> {
iter: I,
types: &'a T,
}
impl<'a, I, T: Container<UnitTypeId>> ExcludeTypes<'a, I, T> {
pub(super) fn new(iter: I, types: &'a T) -> Self {
Self { iter, types }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool + 'a {
let types = self.types;
move |u| !types.contains(&u.type_id)
}
}
impl<'a, I, T> ParallelIterator for ExcludeTypes<'a, I, T>
where
I: ParallelIterator,
I::Item: Borrow<Unit>,
T: Container<UnitTypeId> + Sync,
{
type Item = I::Item;
iterator_methods!();
}
make_simple_iterator!(
/// An iterator that filters ground units.
Ground,
|u| !u.is_flying
);
make_simple_iterator!(
/// An iterator that filters flying units.
Flying,
|u| u.is_flying
);
make_simple_iterator!(
/// An iterator that filters ready units and structures.
Ready,
|u| u.is_ready()
);
make_simple_iterator!(
/// An iterator that filters units structures in-progress.
NotReady,
|u| !u.is_ready()
);
make_simple_iterator!(
/// An iterator that filters units with no orders.
Idle,
|u| u.is_idle()
);
make_simple_iterator!(
/// An iterator that filters units with no orders or almost finished orders.
AlmostIdle,
|u| u.is_almost_idle()
);
make_simple_iterator!(
/// An iterator that filters units with no orders (this also handles buildings with reactor).
Unused,
|u| u.is_unused()
);
make_simple_iterator!(
/// An iterator that filters units with no orders or almost finished orders
/// (this also handles buildings with reactor).
AlmostUnused,
|u| u.is_almost_unused()
);
make_simple_iterator!(
/// An iterator that filters units units visible on current step.
Visible,
|u| u.is_visible()
);
/// An iterator that filters units in attack range of given unit.
#[derive(Clone)]
pub struct InRangeOf<'a, I> {
iter: I,
unit: &'a Unit,
gap: f32,
}
impl<'a, I> InRangeOf<'a, I> {
pub(super) fn new(iter: I, unit: &'a Unit, gap: f32) -> Self {
Self { iter, unit, gap }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool + 'a {
let unit = self.unit;
let gap = self.gap;
move |u| unit.in_range(u, gap)
}
}
impl_simple_iterator!(InRangeOf<'a>);
/// An iterator that filters units close enough to attack given unit.
#[derive(Clone)]
pub struct InRange<'a, I> {
iter: I,
unit: &'a Unit,
gap: f32,
}
impl<'a, I> InRange<'a, I> {
pub(super) fn new(iter: I, unit: &'a Unit, gap: f32) -> Self {
Self { iter, unit, gap }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool + 'a {
let unit = self.unit;
let gap = self.gap;
move |u| u.in_range(unit, gap)
}
}
impl_simple_iterator!(InRange<'a>);
/// An iterator that filters units in attack range of given unit (this also handles range upgrades).
#[derive(Clone)]
pub struct InRealRangeOf<'a, I> {
iter: I,
unit: &'a Unit,
gap: f32,
}
impl<'a, I> InRealRangeOf<'a, I> {
pub(super) fn new(iter: I, unit: &'a Unit, gap: f32) -> Self {
Self { iter, unit, gap }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool + 'a {
let unit = self.unit;
let gap = self.gap;
move |u| unit.in_real_range(u, gap)
}
}
impl_simple_iterator!(InRealRangeOf<'a>);
/// An iterator that filters units close enough to attack given unit (this also handles range upgrades).
#[derive(Clone)]
pub struct InRealRange<'a, I> {
iter: I,
unit: &'a Unit,
gap: f32,
}
impl<'a, I> InRealRange<'a, I> {
pub(super) fn new(iter: I, unit: &'a Unit, gap: f32) -> Self {
Self { iter, unit, gap }
}
fn predicate(&self) -> impl Fn(&Unit) -> bool + 'a {
let unit = self.unit;
let gap = self.gap;
move |u| u.in_real_range(unit, gap)
}
}
impl_simple_iterator!(InRealRange<'a>);
|
{
self
}
|
main.rs
|
use rustc_hash::FxHashMap;
use std::fs;
use std::io::BufWriter;
use std::path::PathBuf;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use analyzeme::{ProfilingData, Timestamp};
use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer};
use serde_json::json;
use std::cmp;
use structopt::StructOpt;
fn as_micros<S: Serializer>(d: &Duration, s: S) -> Result<S::Ok, S::Error> {
let v = (d.as_secs() * 1_000_000) + (d.subsec_nanos() as u64 / 1_000);
s.serialize_u64(v)
}
#[derive(Clone, Copy, Eq, PartialEq, Serialize)]
enum EventType {
#[serde(rename = "X")]
Complete,
}
#[derive(Serialize)]
struct Event {
name: String,
#[serde(rename = "cat")]
category: String,
#[serde(rename = "ph")]
event_type: EventType,
#[serde(rename = "ts", serialize_with = "as_micros")]
#[serde()]
timestamp: Duration,
#[serde(rename = "dur", serialize_with = "as_micros")]
duration: Duration,
#[serde(rename = "pid")]
process_id: u32,
#[serde(rename = "tid")]
thread_id: u32,
args: Option<FxHashMap<String, String>>,
}
#[derive(StructOpt, Debug)]
struct Opt {
#[structopt(required_unless = "dir")]
file_prefix: Vec<PathBuf>,
/// all event trace files in dir will be merged to one chrome_profiler.json file
#[structopt(long = "dir")]
dir: Option<PathBuf>,
/// collapse threads without overlapping events
#[structopt(long = "collapse-threads")]
collapse_threads: bool,
/// filter out events with shorter duration (in microseconds)
#[structopt(long = "minimum-duration")]
minimum_duration: Option<u128>,
}
// generate mapping from thread_id to collapsed thread_id or an empty map
fn generate_thread_to_collapsed_thread_mapping(
opt: &Opt,
data: &ProfilingData,
) -> FxHashMap<u32, u32> {
let mut thread_to_collapsed_thread: FxHashMap<u32, u32> = FxHashMap::default();
if opt.collapse_threads {
// collect start and end times for all threads
let mut thread_start_and_end: FxHashMap<u32, (SystemTime, SystemTime)> =
FxHashMap::default();
for event in data.iter() {
thread_start_and_end
.entry(event.thread_id)
.and_modify(|(thread_start, thread_end)| {
let (event_min, event_max) = timestamp_to_min_max(event.timestamp);
*thread_start = cmp::min(*thread_start, event_min);
*thread_end = cmp::max(*thread_end, event_max);
})
.or_insert_with(|| timestamp_to_min_max(event.timestamp));
}
// collect the the threads in order of the end time
let mut end_and_thread = thread_start_and_end
.iter()
.map(|(&thread_id, &(_start, end))| (end, thread_id))
.collect::<Vec<_>>();
end_and_thread.sort_unstable_by_key(|&(end, _thread_id)| end);
let mut next_end_iter = end_and_thread.iter().peekable();
// collect the the threads in order of the start time
let mut start_and_thread = thread_start_and_end
.iter()
.map(|(&thread_id, &(start, _end))| (start, thread_id))
.collect::<Vec<_>>();
start_and_thread.sort_unstable_by_key(|&(start, _thread_id)| start);
let mut current_thread_id = 0; // use new thread_ids to avoid strange gaps in the numbers
for &(start, thread_id) in start_and_thread.iter() {
// safe to unwrap due to end_and_thread and start_and_thread have the same length
let (next_end, next_thread_id) = next_end_iter.peek().unwrap();
if start > *next_end {
next_end_iter.next();
// need to lookup the thread_id due to new and collapsed threads
let mapped_thread_id = *thread_to_collapsed_thread
.get(&next_thread_id)
.unwrap_or(&next_thread_id);
thread_to_collapsed_thread.insert(thread_id, mapped_thread_id);
} else {
thread_to_collapsed_thread.insert(thread_id, current_thread_id);
current_thread_id += 1;
}
}
}
thread_to_collapsed_thread
}
fn get_args(full_event: &analyzeme::Event) -> Option<FxHashMap<String, String>>
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
let opt = Opt::from_args();
let chrome_file = BufWriter::new(fs::File::create("chrome_profiler.json")?);
let mut serializer = serde_json::Serializer::new(chrome_file);
let mut seq = serializer.serialize_seq(None)?;
let dir_paths = file_prefixes_in_dir(&opt)?;
for file_prefix in opt.file_prefix.iter().chain(dir_paths.iter()) {
let data = ProfilingData::new(&file_prefix)?;
let thread_to_collapsed_thread = generate_thread_to_collapsed_thread_mapping(&opt, &data);
// Chrome does not seem to like how many QueryCacheHit events we generate
// only handle Interval events for now
for event in data.iter().filter(|e| !e.timestamp.is_instant()) {
let duration = event.duration().unwrap();
if let Some(minimum_duration) = opt.minimum_duration {
if duration.as_micros() < minimum_duration {
continue;
}
}
let full_event = event.to_event();
let crox_event = Event {
name: full_event.label.clone().into_owned(),
category: full_event.event_kind.clone().into_owned(),
event_type: EventType::Complete,
timestamp: event.timestamp.start().duration_since(UNIX_EPOCH).unwrap(),
duration,
process_id: data.metadata.process_id,
thread_id: *thread_to_collapsed_thread
.get(&event.thread_id)
.unwrap_or(&event.thread_id),
args: get_args(&full_event),
};
seq.serialize_element(&crox_event)?;
}
// add crate name for the process_id
let index_of_crate_name = data
.metadata
.cmd
.find(" --crate-name ")
.map(|index| index + 14);
if let Some(index) = index_of_crate_name {
let (_, last) = data.metadata.cmd.split_at(index);
let (crate_name, _) = last.split_at(last.find(" ").unwrap_or(last.len()));
let process_name = json!({
"name": "process_name",
"ph" : "M",
"ts" : 0,
"tid" : 0,
"cat" : "",
"pid" : data.metadata.process_id,
"args": {
"name" : crate_name
}
});
seq.serialize_element(&process_name)?;
}
// sort the processes after start time
let process_name = json!({
"name": "process_sort_index",
"ph" : "M",
"ts" : 0,
"tid" : 0,
"cat" : "",
"pid" : data.metadata.process_id,
"args": {
"sort_index" : data.metadata.start_time.duration_since(UNIX_EPOCH).unwrap().as_micros() as u64
}
});
seq.serialize_element(&process_name)?;
}
seq.end()?;
Ok(())
}
fn file_prefixes_in_dir(opt: &Opt) -> Result<Vec<PathBuf>, std::io::Error> {
let mut result = Vec::new();
if let Some(dir_path) = &opt.dir {
for entry in fs::read_dir(dir_path)? {
let entry = entry?;
let path = entry.path();
if path.extension().filter(|e| *e == "events").is_some() {
result.push(path)
}
}
}
Ok(result)
}
fn timestamp_to_min_max(timestamp: Timestamp) -> (SystemTime, SystemTime) {
match timestamp {
Timestamp::Instant(t) => (t, t),
Timestamp::Interval { start, end } => {
// Usually start should always be greater than end, but let's not
// choke on invalid data here.
(cmp::min(start, end), cmp::max(start, end))
}
}
}
|
{
if !full_event.additional_data.is_empty() {
Some(
full_event
.additional_data
.iter()
.enumerate()
.map(|(i, arg)| (format!("arg{}", i).to_string(), arg.to_string()))
.collect(),
)
} else {
None
}
}
|
test_simple_storage.py
|
from brownie import SimpleStorage, accounts
def test_deploy():
# Arrange
account = accounts[0]
# Act
simple_storage = SimpleStorage.deploy({"from": account})
starting_value = simple_storage.retrieve()
expected = 0
# Assert
assert starting_value == expected
def testupdating_storage():
# Arrange
|
account = accounts[0]
simple_storage = SimpleStorage.deploy({"from": account})
# Act
store_value = 100
transaction = simple_storage.store(store_value, {"from": account})
updated_value = simple_storage.retrieve()
# Assert
assert updated_value == store_value
|
|
upload.go
|
package cmd
import (
"context"
"errors"
"fmt"
"io/ioutil"
stencilv1beta1 "github.com/odpf/stencil/server/odpf/stencil/v1beta1"
"github.com/spf13/cobra"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
)
// UploadCmd creates a new cobra command for upload
func
|
() *cobra.Command {
var host, filePath string
var req stencilv1beta1.CreateSchemaRequest
var format, compatibility string
cmd := &cobra.Command{
Use: "upload",
Short: "Upload filedescriptorset file",
Args: cobra.NoArgs,
Annotations: map[string]string{
"group:core": "true",
},
RunE: func(cmd *cobra.Command, args []string) error {
fileData, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
req.Data = fileData
conn, err := grpc.Dial(host, grpc.WithInsecure())
if err != nil {
return err
}
defer conn.Close()
client := stencilv1beta1.NewStencilServiceClient(conn)
req.Compatibility = stencilv1beta1.Schema_Compatibility(stencilv1beta1.Schema_Compatibility_value[compatibility])
req.Format = stencilv1beta1.Schema_Format(stencilv1beta1.Schema_Format_value[format])
_, err = client.CreateSchema(context.Background(), &req)
if err != nil {
errStatus := status.Convert(err)
return errors.New(errStatus.Message())
}
fmt.Println("success")
return nil
},
}
cmd.Flags().StringVar(&host, "host", "", "stencil host address eg: localhost:8000")
cmd.MarkFlagRequired("host")
cmd.Flags().StringVar(&req.NamespaceId, "namespace", "", "provide namespace/group or entity name")
cmd.MarkFlagRequired("namespace")
cmd.Flags().StringVar(&req.SchemaId, "name", "", "provide proto repo name")
cmd.MarkFlagRequired("name")
cmd.Flags().StringVar(&filePath, "file", "", "provide path to fully contained file descriptor set file")
cmd.MarkFlagRequired("file")
cmd.Flags().StringVar(&format, "format", "", "schema format. Valid values are FORMAT_PROTOBUF,FORMAT_AVRO,FORMAT_JSON")
cmd.Flags().StringVar(&compatibility, "compatibility", "COMPATIBILITY_FULL", "schema compatibility. Valid values are COMPATIBILITY_FULL")
return cmd
}
|
UploadCmd
|
rewrite-if-into-ternary.js
|
let result;
if (a + b < 4) {
result = 'Below';
} else {
result = 'Over';
}
let result = (a + b < 4) ? 'Below' : 'Over';
|
//another task
let message;
if (login == 'Employee') {
message = 'Hello';
} else if (login == 'Director') {
message = 'Greetings';
} else if (login == '') {
message = 'No login';
} else {
message = '';
}
let message = login == 'Employee'
? 'Hello'
: login == 'Director'
? 'Greetings'
: login == ''
? 'No login'
: ''
| |
repo.rs
|
use std::path::{Path, PathBuf};
use crate::fs;
use crate::git;
use crate::ignore::Ignorer;
#[derive(Debug)]
pub struct RepositoryFilter {
pub dirty: bool,
pub unpushed: bool,
pub ignorer: Ignorer,
}
#[derive(Debug)]
pub struct Repository {
path: PathBuf,
}
impl Repository {
pub fn run(&self, args: &[String]) -> bool {
git::run_in_context(&self.path, args).unwrap_or(false)
}
pub fn get_path(&self, base: &Path, absolute: bool) -> &Path {
if absolute {
&self.path
} else {
self.rel_path(base)
}
}
fn
|
(&self, base: &Path) -> &Path {
let name = self.path.strip_prefix(base).unwrap();
if name.components().next().is_none() {
Path::new(".")
} else {
name
}
}
}
pub fn list_from_vec(filter: &RepositoryFilter, base_dir: &Path, paths: Vec<PathBuf>) -> Vec<Repository> {
let paths = paths
.into_iter()
.map(|path| {
if path.is_absolute() {
path
} else {
base_dir.join(path)
}
})
.collect();
mkrepos(filter, paths)
}
pub fn list_from_fs(filter: &RepositoryFilter, base_dir: &Path) -> Vec<Repository> {
mkrepos(filter, fs::list_repos(base_dir).unwrap())
}
fn mkrepos(filter: &RepositoryFilter, paths: Vec<PathBuf>) -> Vec<Repository> {
use rayon::prelude::*;
paths
.into_par_iter()
.filter_map(|path| {
if filter.ignorer.is_match(&path)
|| !git::is_actual(&path)
|| (filter.dirty && !git::is_dirty(&path))
|| (filter.unpushed && !git::is_unpushed(&path))
{
None
} else {
Some(Repository { path })
}
})
.collect()
}
|
rel_path
|
handshake.go
|
package dist
import (
"bytes"
"crypto/md5"
"encoding/binary"
"fmt"
"io"
"math/rand"
"time"
"github.com/ergo-services/ergo/lib"
"github.com/ergo-services/ergo/node"
)
const (
DistHandshakeVersion5 node.HandshakeVersion = 5
DistHandshakeVersion6 node.HandshakeVersion = 6
DefaultDistHandshakeVersion = DistHandshakeVersion5
// distribution flags are defined here https://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-flags
flagPublished nodeFlagId = 0x1
flagAtomCache = 0x2
flagExtendedReferences = 0x4
flagDistMonitor = 0x8
flagFunTags = 0x10
flagDistMonitorName = 0x20
flagHiddenAtomCache = 0x40
flagNewFunTags = 0x80
flagExtendedPidsPorts = 0x100
flagExportPtrTag = 0x200
flagBitBinaries = 0x400
flagNewFloats = 0x800
flagUnicodeIO = 0x1000
flagDistHdrAtomCache = 0x2000
flagSmallAtomTags = 0x4000
flagUTF8Atoms = 0x10000
flagMapTag = 0x20000
flagBigCreation = 0x40000
flagSendSender = 0x80000 // since OTP.21 enable replacement for SEND (distProtoSEND by distProtoSEND_SENDER)
flagBigSeqTraceLabels = 0x100000
flagExitPayload = 0x400000 // since OTP.22 enable replacement for EXIT, EXIT2, MONITOR_P_EXIT
flagFragments = 0x800000
flagHandshake23 = 0x1000000 // new connection setup handshake (version 6) introduced in OTP 23
flagUnlinkID = 0x2000000
// for 64bit flags
flagSpawn = 1 << 32
flagNameMe = 1 << 33
flagV4NC = 1 << 34
flagAlias = 1 << 35
)
type nodeFlagId uint64
type nodeFlags nodeFlagId
func (nf nodeFlags) toUint32() uint32 {
return uint32(nf)
}
func (nf nodeFlags) toUint64() uint64 {
return uint64(nf)
}
func (nf nodeFlags) isSet(f nodeFlagId) bool {
return (uint64(nf) & uint64(f)) != 0
}
func toNodeFlags(f ...nodeFlagId) nodeFlags {
var flags uint64
for _, v := range f {
flags |= uint64(v)
}
return nodeFlags(flags)
}
// DistHandshake implements Erlang handshake
type DistHandshake struct {
node.Handshake
nodename string
creation uint32
challenge uint32
timeout time.Duration
options DistHandshakeOptions
}
type DistHandshakeOptions struct {
Version node.HandshakeVersion // 5 or 6
Cookie string
}
func
|
(timeout time.Duration, options DistHandshakeOptions) node.HandshakeInterface {
// must be 5 or 6
if options.Version != DistHandshakeVersion5 && options.Version != DistHandshakeVersion6 {
options.Version = DefaultDistHandshakeVersion
}
return &DistHandshake{
options: options,
challenge: rand.Uint32(),
}
}
// Init implements Handshake interface mothod
func (dh *DistHandshake) Init(nodename string, creation uint32) error {
dh.nodename = nodename
dh.creation = creation
return nil
}
func (dh *DistHandshake) Version() node.HandshakeVersion {
return dh.options.Version
}
func (dh *DistHandshake) Start(conn io.ReadWriter, tls bool) (node.ProtoOptions, error) {
var peer_challenge uint32
var peer_name string
var peer_flags nodeFlags
var protoOptions node.ProtoOptions
flags := toNodeFlags(
flagPublished,
flagUnicodeIO,
flagDistMonitor,
flagDistMonitorName,
flagExtendedPidsPorts,
flagExtendedReferences,
flagAtomCache,
flagDistHdrAtomCache,
flagHiddenAtomCache,
flagNewFunTags,
flagSmallAtomTags,
flagUTF8Atoms,
flagMapTag,
flagFragments,
flagHandshake23,
flagBigCreation,
flagSpawn,
flagV4NC,
flagAlias,
)
b := lib.TakeBuffer()
defer lib.ReleaseBuffer(b)
var await []byte
if dh.options.Version == DistHandshakeVersion5 {
dh.composeName(b, tls, flags)
// the next message must be send_status 's' or send_challenge 'n' (for
// handshake version 5) or 'N' (for handshake version 6)
await = []byte{'s', 'n', 'N'}
} else {
dh.composeNameVersion6(b, tls, flags)
await = []byte{'s', 'N'}
}
if e := b.WriteDataTo(conn); e != nil {
return protoOptions, e
}
// define timeout for the handshaking
timer := time.NewTimer(dh.timeout)
defer timer.Stop()
asyncReadChannel := make(chan error, 2)
asyncRead := func() {
_, e := b.ReadDataFrom(conn, 512)
asyncReadChannel <- e
}
// http://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake
// Every message in the handshake starts with a 16-bit big-endian integer,
// which contains the message length (not counting the two initial bytes).
// In Erlang this corresponds to option {packet, 2} in gen_tcp(3). Notice
// that after the handshake, the distribution switches to 4 byte packet headers.
expectingBytes := 2
if tls {
// TLS connection has 4 bytes packet length header
expectingBytes = 4
}
for {
go asyncRead()
select {
case <-timer.C:
return protoOptions, fmt.Errorf("handshake timeout")
case e := <-asyncReadChannel:
if e != nil {
return protoOptions, e
}
next:
l := binary.BigEndian.Uint16(b.B[expectingBytes-2 : expectingBytes])
buffer := b.B[expectingBytes:]
if len(buffer) < int(l) {
return protoOptions, fmt.Errorf("malformed handshake (wrong packet length)")
}
// chech if we got correct message type regarding to 'await' value
if bytes.Count(await, buffer[0:1]) == 0 {
return protoOptions, fmt.Errorf("malformed handshake (wrong response)")
}
switch buffer[0] {
case 'n':
// 'n' + 2 (version) + 4 (flags) + 4 (challenge) + name...
if len(b.B) < 12 {
return protoOptions, fmt.Errorf("malformed handshake ('n')")
}
peer_challenge, peer_name, peer_flags = dh.readChallenge(b.B[1:])
if peer_challenge == 0 {
return protoOptions, fmt.Errorf("malformed handshake (mismatch handshake version")
}
b.Reset()
dh.composeChallengeReply(b, peer_challenge, tls)
if e := b.WriteDataTo(conn); e != nil {
return protoOptions, e
}
// add 's' status for the case if we got it after 'n' or 'N' message
// yes, sometime it happens
await = []byte{'s', 'a'}
case 'N':
// Peer support version 6.
// The new challenge message format (version 6)
// 8 (flags) + 4 (Creation) + 2 (NameLen) + Name
if len(buffer) < 16 {
return protoOptions, fmt.Errorf("malformed handshake ('N' length)")
}
peer_challenge, peer_name, peer_flags = dh.readChallengeVersion6(buffer[1:])
b.Reset()
if dh.options.Version == DistHandshakeVersion5 {
// upgrade handshake to version 6 by sending complement message
dh.composeComplement(b, flags, tls)
if e := b.WriteDataTo(conn); e != nil {
return protoOptions, e
}
}
dh.composeChallengeReply(b, peer_challenge, tls)
if e := b.WriteDataTo(conn); e != nil {
return protoOptions, e
}
// add 's' (send_status message) for the case if we got it after 'n' or 'N' message
await = []byte{'s', 'a'}
case 'a':
// 'a' + 16 (digest)
if len(buffer) != 17 {
return protoOptions, fmt.Errorf("malformed handshake ('a' length of digest)")
}
// 'a' + 16 (digest)
digest := genDigest(dh.challenge, dh.options.Cookie)
if bytes.Compare(buffer[1:17], digest) != 0 {
return protoOptions, fmt.Errorf("malformed handshake ('a' digest)")
}
// handshaked
//FIXME
protoOptions = node.DefaultProtoOptions(0, false)
return protoOptions, nil
case 's':
if dh.readStatus(buffer[1:]) == false {
return protoOptions, fmt.Errorf("handshake negotiation failed")
}
await = []byte{'n', 'N'}
// "sok"
if len(buffer) > 4 {
b.B = b.B[expectingBytes+3:]
goto next
}
b.Reset()
default:
return protoOptions, fmt.Errorf("malformed handshake ('%c' digest)", buffer[0])
}
}
}
}
func (dh *DistHandshake) Accept(conn io.ReadWriter, tls bool) (string, node.ProtoOptions, error) {
var peer_challenge uint32
var peer_name string
var peer_flags nodeFlags
var protoOptions node.ProtoOptions
var err error
flags := toNodeFlags(
flagPublished,
flagUnicodeIO,
flagDistMonitor,
flagDistMonitorName,
flagExtendedPidsPorts,
flagExtendedReferences,
flagAtomCache,
flagDistHdrAtomCache,
flagHiddenAtomCache,
flagNewFunTags,
flagSmallAtomTags,
flagUTF8Atoms,
flagMapTag,
flagFragments,
flagHandshake23,
flagBigCreation,
flagSpawn,
flagV4NC,
flagAlias,
)
b := lib.TakeBuffer()
defer lib.ReleaseBuffer(b)
var await []byte
// define timeout for the handshaking
timer := time.NewTimer(dh.timeout)
defer timer.Stop()
asyncReadChannel := make(chan error, 2)
asyncRead := func() {
_, e := b.ReadDataFrom(conn, 512)
asyncReadChannel <- e
}
// http://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake
// Every message in the handshake starts with a 16-bit big-endian integer,
// which contains the message length (not counting the two initial bytes).
// In Erlang this corresponds to option {packet, 2} in gen_tcp(3). Notice
// that after the handshake, the distribution switches to 4 byte packet headers.
expectingBytes := 2
if tls {
// TLS connection has 4 bytes packet length header
expectingBytes = 4
}
// the comming message must be 'receive_name' as an answer for the
// 'send_name' message request we just sent
await = []byte{'n', 'N'}
for {
go asyncRead()
select {
case <-timer.C:
return peer_name, protoOptions, fmt.Errorf("handshake accept timeout")
case e := <-asyncReadChannel:
if e != nil {
return peer_name, protoOptions, e
}
if b.Len() < expectingBytes+1 {
return peer_name, protoOptions, fmt.Errorf("malformed handshake (too short packet)")
}
next:
l := binary.BigEndian.Uint16(b.B[expectingBytes-2 : expectingBytes])
buffer := b.B[expectingBytes:]
if len(buffer) < int(l) {
return peer_name, protoOptions, fmt.Errorf("malformed handshake (wrong packet length)")
}
if bytes.Count(await, buffer[0:1]) == 0 {
return peer_name, protoOptions, fmt.Errorf("malformed handshake (wrong response %d)", buffer[0])
}
switch buffer[0] {
case 'n':
if len(buffer) < 8 {
return peer_name, protoOptions, fmt.Errorf("malformed handshake ('n' length)")
}
peer_name, peer_flags, err = dh.readName(buffer[1:])
if err != nil {
return peer_name, protoOptions, err
}
b.Reset()
dh.composeStatus(b, tls)
if e := b.WriteDataTo(conn); e != nil {
return peer_name, protoOptions, fmt.Errorf("malformed handshake ('n' accept name)")
}
b.Reset()
if peer_flags.isSet(flagHandshake23) {
dh.composeChallengeVersion6(b, tls, flags)
await = []byte{'s', 'r', 'c'}
} else {
dh.composeChallenge(b, tls, flags)
await = []byte{'s', 'r'}
}
if e := b.WriteDataTo(conn); e != nil {
return peer_name, protoOptions, e
}
case 'N':
// The new challenge message format (version 6)
// 8 (flags) + 4 (Creation) + 2 (NameLen) + Name
if len(buffer) < 16 {
return peer_name, protoOptions, fmt.Errorf("malformed handshake ('N' length)")
}
peer_name, peer_flags, err = dh.readNameVersion6(buffer[1:])
if err != nil {
return peer_name, protoOptions, err
}
b.Reset()
dh.composeStatus(b, tls)
if e := b.WriteDataTo(conn); e != nil {
return peer_name, protoOptions, fmt.Errorf("malformed handshake ('N' accept name)")
}
b.Reset()
dh.composeChallengeVersion6(b, tls, flags)
if e := b.WriteDataTo(conn); e != nil {
return peer_name, protoOptions, e
}
await = []byte{'s', 'r'}
case 'c':
if len(buffer) < 9 {
return peer_name, protoOptions, fmt.Errorf("malformed handshake ('c' length)")
}
peer_flags = dh.readComplement(buffer[1:], peer_flags)
await = []byte{'r'}
if len(buffer) > 9 {
b.B = b.B[expectingBytes+9:]
goto next
}
b.Reset()
case 'r':
if len(buffer) < 19 {
return peer_name, protoOptions, fmt.Errorf("malformed handshake ('r' length)")
}
peer_challenge, valid := dh.validateChallengeReply(buffer[1:])
if valid == false {
return peer_name, protoOptions, fmt.Errorf("malformed handshake ('r' invalid reply)")
}
b.Reset()
dh.composeChallengeAck(b, peer_challenge, tls)
if e := b.WriteDataTo(conn); e != nil {
return peer_name, protoOptions, e
}
// handshaked
// FIXME
protoOptions = node.DefaultProtoOptions(0, false)
return peer_name, protoOptions, nil
case 's':
if dh.readStatus(buffer[1:]) == false {
return peer_name, protoOptions, fmt.Errorf("link status != ok")
}
await = []byte{'c', 'r'}
if len(buffer) > 4 {
b.B = b.B[expectingBytes+3:]
goto next
}
b.Reset()
default:
return peer_name, protoOptions, fmt.Errorf("malformed handshake (unknown code %d)", b.B[0])
}
}
}
}
// private functions
func (dh *DistHandshake) composeName(b *lib.Buffer, tls bool, flags nodeFlags) {
version := uint16(dh.options.Version)
if tls {
b.Allocate(11)
dataLength := 7 + len(dh.nodename) // byte + uint16 + uint32 + len(dh.nodename)
binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
b.B[4] = 'n'
binary.BigEndian.PutUint16(b.B[5:7], version) // uint16
binary.BigEndian.PutUint32(b.B[7:11], flags.toUint32()) // uint32
b.Append([]byte(dh.nodename))
return
}
b.Allocate(9)
dataLength := 7 + len(dh.nodename) // byte + uint16 + uint32 + len(dh.nodename)
binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
b.B[2] = 'n'
binary.BigEndian.PutUint16(b.B[3:5], version) // uint16
binary.BigEndian.PutUint32(b.B[5:9], flags.toUint32()) // uint32
b.Append([]byte(dh.nodename))
}
func (dh *DistHandshake) composeNameVersion6(b *lib.Buffer, tls bool, flags nodeFlags) {
creation := uint32(dh.creation)
if tls {
b.Allocate(19)
dataLength := 15 + len(dh.nodename) // 1 + 8 (flags) + 4 (creation) + 2 (len dh.nodename)
binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
b.B[4] = 'N'
binary.BigEndian.PutUint64(b.B[5:13], flags.toUint64()) // uint64
binary.BigEndian.PutUint32(b.B[13:17], creation) //uint32
binary.BigEndian.PutUint16(b.B[17:19], uint16(len(dh.nodename))) // uint16
b.Append([]byte(dh.nodename))
return
}
b.Allocate(17)
dataLength := 15 + len(dh.nodename) // 1 + 8 (flags) + 4 (creation) + 2 (len dh.nodename)
binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
b.B[2] = 'N'
binary.BigEndian.PutUint64(b.B[3:11], flags.toUint64()) // uint64
binary.BigEndian.PutUint32(b.B[11:15], creation) // uint32
binary.BigEndian.PutUint16(b.B[15:17], uint16(len(dh.nodename))) // uint16
b.Append([]byte(dh.nodename))
}
func (dh *DistHandshake) readName(b []byte) (string, nodeFlags, error) {
if len(b[6:]) > 250 {
return "", 0, fmt.Errorf("Malformed node name")
}
nodename := string(b[6:])
flags := nodeFlags(binary.BigEndian.Uint32(b[2:6]))
// don't care of version value. its always == 5 according to the spec
// version := binary.BigEndian.Uint16(b[0:2])
return nodename, flags, nil
}
func (dh *DistHandshake) readNameVersion6(b []byte) (string, nodeFlags, error) {
nameLen := int(binary.BigEndian.Uint16(b[12:14]))
if nameLen > 250 {
return "", 0, fmt.Errorf("Malformed node name")
}
nodename := string(b[14 : 14+nameLen])
flags := nodeFlags(binary.BigEndian.Uint64(b[0:8]))
// don't care of peer creation value
// creation:= binary.BigEndian.Uint32(b[8:12]),
return nodename, flags, nil
}
func (dh *DistHandshake) composeStatus(b *lib.Buffer, tls bool) {
// there are few options for the status: ok, ok_simultaneous, nok, not_allowed, alive
// More details here: https://erlang.org/doc/apps/erts/erl_dist_protocol.html#the-handshake-in-detail
// support "ok" only, in any other cases link will be just closed
if tls {
b.Allocate(4)
dataLength := 3 // 's' + "ok"
binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
b.Append([]byte("sok"))
return
}
b.Allocate(2)
dataLength := 3 // 's' + "ok"
binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
b.Append([]byte("sok"))
}
func (dh *DistHandshake) readStatus(msg []byte) bool {
if string(msg[:2]) == "ok" {
return true
}
return false
}
func (dh *DistHandshake) composeChallenge(b *lib.Buffer, tls bool, flags nodeFlags) {
if tls {
b.Allocate(15)
dataLength := uint32(11 + len(dh.nodename))
binary.BigEndian.PutUint32(b.B[0:4], dataLength)
b.B[4] = 'n'
binary.BigEndian.PutUint16(b.B[5:7], uint16(dh.options.Version)) // uint16
binary.BigEndian.PutUint32(b.B[7:11], flags.toUint32()) // uint32
binary.BigEndian.PutUint32(b.B[11:15], dh.challenge) // uint32
b.Append([]byte(dh.nodename))
return
}
b.Allocate(13)
dataLength := 11 + len(dh.nodename)
binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
b.B[2] = 'n'
binary.BigEndian.PutUint16(b.B[3:5], uint16(dh.options.Version)) // uint16
binary.BigEndian.PutUint32(b.B[5:9], flags.toUint32()) // uint32
binary.BigEndian.PutUint32(b.B[9:13], dh.challenge) // uint32
b.Append([]byte(dh.nodename))
}
func (dh *DistHandshake) composeChallengeVersion6(b *lib.Buffer, tls bool, flags nodeFlags) {
if tls {
// 1 ('N') + 8 (flags) + 4 (chalange) + 4 (creation) + 2 (len(dh.nodename))
b.Allocate(23)
dataLength := 19 + len(dh.nodename)
binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
b.B[4] = 'N'
binary.BigEndian.PutUint64(b.B[5:13], uint64(flags)) // uint64
binary.BigEndian.PutUint32(b.B[13:17], dh.challenge) // uint32
binary.BigEndian.PutUint32(b.B[17:21], dh.creation) // uint32
binary.BigEndian.PutUint16(b.B[21:23], uint16(len(dh.nodename))) // uint16
b.Append([]byte(dh.nodename))
return
}
// 1 ('N') + 8 (flags) + 4 (chalange) + 4 (creation) + 2 (len(dh.nodename))
b.Allocate(21)
dataLength := 19 + len(dh.nodename)
binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
b.B[2] = 'N'
binary.BigEndian.PutUint64(b.B[3:11], uint64(flags)) // uint64
binary.BigEndian.PutUint32(b.B[11:15], dh.challenge) // uint32
binary.BigEndian.PutUint32(b.B[15:19], dh.creation) // uint32
binary.BigEndian.PutUint16(b.B[19:21], uint16(len(dh.nodename))) // uint16
b.Append([]byte(dh.nodename))
}
// returns challange, nodename, nodeFlags
func (dh *DistHandshake) readChallenge(msg []byte) (challenge uint32, nodename string, flags nodeFlags) {
version := binary.BigEndian.Uint16(msg[0:2])
if version != uint16(DistHandshakeVersion5) {
return
}
challenge = binary.BigEndian.Uint32(msg[6:10])
nodename = string(msg[10:])
flags = nodeFlags(binary.BigEndian.Uint32(msg[2:6]))
return
}
func (dh *DistHandshake) readChallengeVersion6(msg []byte) (challenge uint32, nodename string, flags nodeFlags) {
lenName := int(binary.BigEndian.Uint16(msg[16:18]))
challenge = binary.BigEndian.Uint32(msg[8:12])
nodename = string(msg[18 : 18+lenName])
flags = nodeFlags(binary.BigEndian.Uint64(msg[0:8]))
// don't care about 'creation'
// creation := binary.BigEndian.Uint32(msg[12:16]),
return
}
func (dh *DistHandshake) readComplement(msg []byte, peer_flags nodeFlags) nodeFlags {
flags := uint64(binary.BigEndian.Uint32(msg[0:4])) << 32
peer_flags = nodeFlags(peer_flags.toUint64() | flags)
// creation = binary.BigEndian.Uint32(msg[4:8])
return peer_flags
}
func (dh *DistHandshake) validateChallengeReply(b []byte) (uint32, bool) {
challenge := binary.BigEndian.Uint32(b[:4])
digestB := b[4:]
digestA := genDigest(dh.challenge, dh.options.Cookie)
return challenge, bytes.Equal(digestA[:], digestB)
}
func (dh *DistHandshake) composeChallengeAck(b *lib.Buffer, peer_challenge uint32, tls bool) {
if tls {
b.Allocate(5)
dataLength := uint32(17) // 'a' + 16 (digest)
binary.BigEndian.PutUint32(b.B[0:4], dataLength)
b.B[4] = 'a'
digest := genDigest(peer_challenge, dh.options.Cookie)
b.Append(digest)
return
}
b.Allocate(3)
dataLength := uint16(17) // 'a' + 16 (digest)
binary.BigEndian.PutUint16(b.B[0:2], dataLength)
b.B[2] = 'a'
digest := genDigest(peer_challenge, dh.options.Cookie)
b.Append(digest)
}
func (dh *DistHandshake) composeChallengeReply(b *lib.Buffer, challenge uint32, tls bool) {
if tls {
digest := genDigest(challenge, dh.options.Cookie)
b.Allocate(9)
dataLength := 5 + len(digest) // 1 (byte) + 4 (challenge) + 16 (digest)
binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
b.B[4] = 'r'
binary.BigEndian.PutUint32(b.B[5:9], dh.challenge) // uint32
b.Append(digest)
return
}
b.Allocate(7)
digest := genDigest(challenge, dh.options.Cookie)
dataLength := 5 + len(digest) // 1 (byte) + 4 (challenge) + 16 (digest)
binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
b.B[2] = 'r'
binary.BigEndian.PutUint32(b.B[3:7], dh.challenge) // uint32
b.Append(digest)
}
func (dh *DistHandshake) composeComplement(b *lib.Buffer, flags nodeFlags, tls bool) {
// cast must cast creation to int32 in order to follow the
// erlang's handshake. Ergo don't care of it.
node_flags := uint32(flags.toUint64() >> 32)
if tls {
b.Allocate(13)
dataLength := 9 // 1 + 4 (flag high) + 4 (creation)
binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
b.B[4] = 'c'
binary.BigEndian.PutUint32(b.B[5:9], node_flags)
binary.BigEndian.PutUint32(b.B[9:13], dh.creation)
return
}
dataLength := 9 // 1 + 4 (flag high) + 4 (creation)
b.Allocate(11)
binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
b.B[2] = 'c'
binary.BigEndian.PutUint32(b.B[3:7], node_flags)
binary.BigEndian.PutUint32(b.B[7:11], dh.creation)
}
func genDigest(challenge uint32, cookie string) []byte {
s := fmt.Sprintf("%s%d", cookie, challenge)
digest := md5.Sum([]byte(s))
return digest[:]
}
|
CreateDistHandshake
|
exceptions.py
|
#
"""
Texar defined exceptions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
__all__ = [
"TexarError"
]
class
|
(Exception):
"""
Texar error.
"""
pass
|
TexarError
|
index.ts
|
import {Component} from '@angular/core';
import {FormControl, FormGroup} from '@angular/forms';
import {TuiTime} from '@taiga-ui/cdk';
import {changeDetection} from '../../../../../change-detection-strategy';
import {encapsulation} from '../../../../../view-encapsulation';
@Component({
selector: 'tui-input-time-example-1',
templateUrl: './index.html',
changeDetection,
encapsulation,
})
export class
|
{
readonly testForm = new FormGroup({
testValue: new FormControl(new TuiTime(12, 30)),
});
}
|
TuiInputTimeExample1
|
binaries_test.go
|
package binaries
import (
"fmt"
"io/ioutil"
"log"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func tmpDir(t *testing.T) string {
dir, err := ioutil.TempDir("/tmp", "prisma-client-go-test-fetchEngine-")
if err != nil {
t.Fatal(err)
}
return dir
}
func TestFetch(t *testing.T) {
dir := tmpDir(t)
//goland:noinspection GoUnhandledErrorResult
defer os.RemoveAll(dir)
if err := FetchNative(dir); err != nil {
t.Fatalf("fetchEngine failed: %s", err)
}
}
func TestFetch_localDir(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := FetchNative(wd); err != nil {
t.Fatalf("fetchEngine failed: %s", err)
}
}
func TestFetch_withCache(t *testing.T) {
dir := tmpDir(t)
//goland:noinspection GoUnhandledErrorResult
defer os.RemoveAll(dir)
start := time.Now()
if err := FetchNative(dir); err != nil {
t.Fatalf("fetchEngine 1 failed: %s", err)
}
log.Printf("first fetchEngine took %s", time.Since(start))
start = time.Now()
if err := FetchNative(dir); err != nil {
t.Fatalf("fetchEngine 2 failed: %s", err)
}
log.Printf("second fetchEngine took %s", time.Since(start))
|
}
func TestFetch_relativeDir(t *testing.T) {
actual := FetchNative(".")
expected := fmt.Errorf("toDir must be absolute")
assert.Equal(t, expected, actual)
}
|
if time.Since(start) > 10*time.Millisecond {
t.Fatalf("second fetchEngine took more than 10ms")
}
|
server.py
|
from flask import Flask, send_from_directory
import os
import connexion
from connexion.resolver import RestyResolver
from flask_socketio import SocketIO, send, emit
# Constants
STATIC_FILES_DIR = 'static'
# The application server
app = connexion.FlaskApp(__name__, specification_dir='swagger/', resolver=RestyResolver('VrDemoServer.api'))
flask_app = app.app
# We define the websocket feature
socketio = SocketIO(flask_app)
@socketio.on('message')
def handle_message(message):
|
# We add the OpenApi definitions
app.add_api('demo2.yaml')
# we define that all static content will be served from the STATIC_FILES_DIR subdirectory
static_files_dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), STATIC_FILES_DIR)
app.static_folder = STATIC_FILES_DIR
app.add_url_rule(
app.app.static_url_path + "/<path:filename>",
endpoint="static",
view_func=app.app.send_static_file,
)
# we redirect the root index.thml to the app_index.html
@app.route('/', methods=['GET'])
def serve_index():
return send_from_directory(static_files_dir_path, 'app_index.html')
# function to launch the server
def run_server(ip, port=8080, debug=False):
# print('starting rest api')
# app.run(host=ip, port=port, debug=debug)
print('starting websocket')
socketio.run(flask_app, host=ip, port=port, debug=debug)
|
print('received message: ' + message)
emit('my response', "res"+message)
|
timestamp.ts
|
import {Operator} from '../Operator';
import {Observable} from '../Observable';
import {Subscriber} from '../Subscriber';
import {Scheduler} from '../Scheduler';
import {async} from '../scheduler/async';
/**
* @param scheduler
* @return {Observable<Timestamp<any>>|WebSocketSubject<T>|Observable<T>}
* @method timestamp
* @owner Observable
*/
export function timestamp<T>(scheduler: Scheduler = async): Observable<Timestamp<T>> {
return this.lift(new TimestampOperator(scheduler));
}
export interface TimestampSignature<T> {
(scheduler?: Scheduler): Observable<Timestamp<T>>;
}
export class Timestamp<T> {
constructor(public value: T, public timestamp: number) {
}
};
class
|
<T> implements Operator<T, Timestamp<T>> {
constructor(private scheduler: Scheduler) {
}
call(observer: Subscriber<Timestamp<T>>): Subscriber<T> {
return new TimestampSubscriber(observer, this.scheduler);
}
}
class TimestampSubscriber<T> extends Subscriber<T> {
constructor(destination: Subscriber<Timestamp<T>>, private scheduler: Scheduler) {
super(destination);
}
protected _next(value: T): void {
const now = this.scheduler.now();
this.destination.next(new Timestamp(value, now));
}
}
|
TimestampOperator
|
validation.go
|
package operator
import (
"fmt"
"net"
"os"
"k8s.io/apimachinery/pkg/util/errors"
)
// IsValidConfig checks the validity of all configuration options.
func IsValidConfig(cfg *Config) error {
errs := []error{}
errs = append(errs, IsValidNamespaceConfig(cfg))
errs = append(errs, IsValidListenConfig(cfg))
errs = append(errs, IsValidPrestoConfig(cfg))
errs = append(errs, IsValidHiveConfig(cfg))
errs = append(errs, IsValidKubeConfig(cfg.Kubeconfig))
errs = append(errs, IsValidPrometheusConfig(cfg))
if err := isValidTLSConfig(&cfg.APITLSConfig); err != nil {
errs = append(errs, fmt.Errorf("error validating apiTLSConfig: %s", err.Error()))
}
if err := isValidTLSConfig(&cfg.MetricsTLSConfig); err != nil {
errs = append(errs, fmt.Errorf("error validating metricsTLSConfig: %s", err.Error()))
}
if len(errs) != 0 {
return errors.NewAggregate(errs)
}
return nil
}
// IsValidNamespaceConfig ensures that if you are using target namespaces the all namespace field is correct.
func IsValidNamespaceConfig(cfg *Config) error {
if len(cfg.TargetNamespaces) > 1 && !cfg.AllNamespaces {
return fmt.Errorf("must set allNamespaces if more than one namespace is passed to targetNamespaces")
}
return nil
}
// IsValidListenConfig ensures all *Listen fields are set to valid host/ports if they have a value set.
func IsValidListenConfig(cfg *Config) error {
errs := []error{}
errs = append(errs, isValidHostPort(cfg.APIListen, "apiListen"))
if len(errs) > 0 {
return errors.NewAggregate(errs)
}
return nil
}
// IsValidPrestoConfig ensure all Presto* fields are valid if provided.
func IsValidPrestoConfig(cfg *Config) error {
errs := []error{}
if !cfg.PrestoUseTLS {
if cfg.PrestoUseClientCertAuth {
errs = append(errs, fmt.Errorf("prestoUseClientCertAuth cannot be set to true if prestoUseTLS is false"))
}
if len(cfg.PrestoCAFile) > 0 {
errs = append(errs, fmt.Errorf("prestoCAFile cannot be set if prestoUseTLS is false"))
}
}
if len(cfg.PrestoCAFile) > 0 {
if _, err := os.Stat(cfg.PrestoCAFile); err != nil {
errs = append(errs, err)
}
}
if (len(cfg.PrestoClientCertFile) > 0 && len(cfg.PrestoClientKeyFile) == 0) ||
(len(cfg.PrestoClientKeyFile) > 0 && len(cfg.PrestoClientCertFile) == 0) {
errs = append(errs, fmt.Errorf("prestoClientCertFile and prestoClientKeyFile must both be specified or neither specified"))
}
if len(errs) > 0 {
return errors.NewAggregate(errs)
}
return nil
}
// IsValidHiveConfig ensure all Hive* fields are valid if provided.
func IsValidHiveConfig(cfg *Config) error {
errs := []error{}
if !cfg.HiveUseTLS {
if cfg.HiveUseClientCertAuth {
errs = append(errs, fmt.Errorf("hiveUseClientCertAuth cannot be set to true if hiveUseTLS is false"))
}
if len(cfg.HiveCAFile) > 0 {
errs = append(errs, fmt.Errorf("hiveCAFile cannot be set if hiveUseTLS is false"))
}
}
if len(cfg.HiveCAFile) > 0 {
if _, err := os.Stat(cfg.HiveCAFile); err != nil {
errs = append(errs, err)
}
}
if (len(cfg.HiveClientCertFile) > 0 && len(cfg.HiveClientKeyFile) == 0) ||
(len(cfg.HiveClientKeyFile) > 0 && len(cfg.HiveClientCertFile) == 0) {
errs = append(errs, fmt.Errorf("hiveClientCertFile and hiveClientKeyFile must both be specified or neither specified"))
}
if len(errs) > 0 {
return errors.NewAggregate(errs)
}
return nil
}
// IsValidKubeConfig ensures the kube config is set to a valid file if provided.
func IsValidKubeConfig(kubeconfig string) error {
if len(kubeconfig) > 0 {
if _, err := os.Stat(kubeconfig); err != nil {
return err
}
}
return nil
}
// IsValidPrometheusConfig ensures prometheus configuration is valid.
func IsValidPrometheusConfig(cfg *Config) error {
errs := []error{}
if cfg.PrometheusConfig.CAFile != "" {
if _, err := os.Stat(cfg.PrometheusConfig.CAFile); err != nil {
errs = append(errs, err)
}
}
// PrometheusDataSourceMaxBackfillImportDuration overrides PrometheusDataSourceGlobalImportFromTime
// don't set both.
if cfg.PrometheusDataSourceGlobalImportFromTime != nil && cfg.PrometheusDataSourceMaxBackfillImportDuration > 0 {
errs = append(errs, fmt.Errorf("prometheusDataSourceGlobalImportFromTime and prometheusDataSourceMaxBackfillImportDuration cannot both be set"))
}
if len(errs) > 0 {
return errors.NewAggregate(errs)
}
return nil
}
// IsValidTLSConfig ensures the TLS config is valid.
func
|
(cfg *TLSConfig) error {
if cfg.UseTLS {
if cfg.TLSCert == "" {
return fmt.Errorf("must set TLS certificate if TLS is enabled")
}
if cfg.TLSKey == "" {
return fmt.Errorf("must set TLS private key if TLS is enabled")
}
}
return nil
}
// isValidHostPort attempts to split a non empty hp into host and port, returning any errors found.
// TODO this is only validating non-empty strings. We may want to check for empty strings an report errors.
// TODO this requires a port to be specified, is that one of our requirements?
func isValidHostPort(hp string, name string) error {
if len(hp) > 0 {
if _, _, err := net.SplitHostPort(hp); err != nil {
return fmt.Errorf("invalid %s: %s", name, err.Error())
}
}
return nil
}
|
isValidTLSConfig
|
ToolStripContentPanelRenderEventHandler.py
|
class ToolStripContentPanelRenderEventHandler(MulticastDelegate,ICloneable,ISerializable):
"""
Represents the method that will handle the System.Windows.Forms.ToolStripContentPanel.RendererChanged event of a System.Windows.Forms.ToolStripContentPanel.
ToolStripContentPanelRenderEventHandler(object: object,method: IntPtr)
"""
def BeginInvoke(self,sender,e,callback,object):
""" BeginInvoke(self: ToolStripContentPanelRenderEventHandler,sender: object,e: ToolStripContentPanelRenderEventArgs,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self,*args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self,*args):
"""
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current
delegate.-or- null,if the method represented by the current delegate does not require
arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self,result):
""" EndInvoke(self: ToolStripContentPanelRenderEventHandler,result: IAsyncResult) """
pass
def GetMethodImpl(self,*args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
|
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self,sender,e):
""" Invoke(self: ToolStripContentPanelRenderEventHandler,sender: object,e: ToolStripContentPanelRenderEventArgs) """
pass
def RemoveImpl(self,*args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to
the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new System.Delegate without
value in its invocation list; otherwise,this instance with its original invocation list.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,object,method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self,*args):
pass
| |
traits.rs
|
// Copyright 2017 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use bytes::Bytes;
use futures::future::Future;
use std::{io::Error as IoError, ops::Not};
use Multiaddr;
/// Type of connection for the upgrade.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Endpoint {
/// The socket comes from a dialer.
Dialer,
/// The socket comes from a listener.
Listener,
}
impl Not for Endpoint {
type Output = Endpoint;
fn
|
(self) -> Self::Output {
match self {
Endpoint::Dialer => Endpoint::Listener,
Endpoint::Listener => Endpoint::Dialer
}
}
}
/// Implemented on structs that describe a possible upgrade to a connection between two peers.
///
/// The generic `C` is the type of the incoming connection before it is upgraded.
///
/// > **Note**: The `upgrade` method of this trait uses `self` and not `&self` or `&mut self`.
/// > This has been designed so that you would implement this trait on `&Foo` or
/// > `&mut Foo` instead of directly on `Foo`.
pub trait ConnectionUpgrade<C> {
/// Iterator returned by `protocol_names`.
type NamesIter: Iterator<Item = (Bytes, Self::UpgradeIdentifier)>;
/// Type that serves as an identifier for the protocol. This type only exists to be returned
/// by the `NamesIter` and then be passed to `upgrade`.
///
/// This is only useful on implementations that dispatch between multiple possible upgrades.
/// Any basic implementation will probably just use the `()` type.
type UpgradeIdentifier;
/// Returns the name of the protocols to advertise to the remote.
fn protocol_names(&self) -> Self::NamesIter;
/// Type of the stream that has been upgraded. Generally wraps around `C` and `Self`.
///
/// > **Note**: For upgrades that add an intermediary layer (such as `secio` or `multiplex`),
/// > this associated type must implement `AsyncRead + AsyncWrite`.
type Output;
/// Type of the future that will resolve to `Self::Output`.
type Future: Future<Item = Self::Output, Error = IoError>;
/// This method is called after protocol negotiation has been performed.
///
/// Because performing the upgrade may not be instantaneous (eg. it may require a handshake),
/// this function returns a future instead of the direct output.
fn upgrade(
self,
socket: C,
id: Self::UpgradeIdentifier,
ty: Endpoint,
remote_addr: &Multiaddr,
) -> Self::Future;
}
|
not
|
cmd.go
|
package cmd
import (
"flag"
"fmt"
"log"
"os"
"github.com/orange-lightsaber/pretty-safe-backup/run"
)
func Exec(version string)
|
{
v := flag.Bool("v", false, "Print version.")
loadProfileDefault := "/path/to/profile"
loadProfile := flag.String("L", loadProfileDefault, "Load a profile.")
flag.Parse()
if *v {
fmt.Printf("psb v%s\n", version)
os.Exit(0)
}
if *loadProfile != loadProfileDefault {
rc, err := run.DecodeRunConfig(*loadProfile)
if err != nil {
log.Fatal(err.Error())
}
newRunConfigFile, err := rc.WriteRunConfig()
if err != nil {
log.Fatal(err.Error())
}
fmt.Printf("Run config created: %s\n", newRunConfigFile)
os.Exit(0)
}
run.Daemonize()
}
|
|
convert_test.go
|
package utils
import (
"sort"
"testing"
tt "github.com/verlandz/go-pkg/tester"
)
func TestParseInt(t *testing.T) {
var tcs = []struct {
name string
input string
expected int
}{
{
name: tt.Name{
Given: "string",
When: "the value is valid",
Then: "return valid int",
}.Construct(),
input: "123",
expected: 123,
},
{
name: tt.Name{
Given: "string",
When: "the value is invalid",
Then: "return empty int",
}.Construct(),
input: "123x",
expected: 0,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
actual := ParseInt(tc.input)
tt.Equal(t, tc.expected, actual)
})
}
}
func TestParseInt64(t *testing.T) {
var tcs = []struct {
name string
input string
expected int64
}{
{
name: tt.Name{
Given: "string",
When: "the value is valid",
Then: "return valid int64",
}.Construct(),
input: "123",
expected: 123,
},
{
name: tt.Name{
Given: "string",
When: "the value is invalid",
Then: "return empty int64",
}.Construct(),
input: "123x",
expected: 0,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
actual := ParseInt64(tc.input)
tt.Equal(t, tc.expected, actual)
})
}
}
func TestParseSliceInt(t *testing.T)
|
func TestParseSliceInt64(t *testing.T) {
var tcs = []struct {
name string
input []string
expected []int64
}{
{
name: tt.Name{
Given: "slice of string",
When: "the value is valid",
Then: "return valid slice of int64",
}.Construct(),
input: []string{"1", "2"},
expected: []int64{1, 2},
},
{
name: tt.Name{
Given: "slice of string",
When: "the value is invalid",
Then: "return invalid slice of int64",
}.Construct(),
input: []string{"x", "y"},
expected: []int64{0, 0},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
actual := ParseSliceInt64(tc.input)
tt.Equal(t, tc.expected, actual)
})
}
}
func TestFormatInt(t *testing.T) {
var tcs = []struct {
name string
input int
expected string
}{
{
name: tt.Name{
Given: "int",
When: "the value is valid",
Then: "return valid string",
}.Construct(),
input: 123,
expected: "123",
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
actual := FormatInt(tc.input)
tt.Equal(t, tc.expected, actual)
})
}
}
func TestFormatInt64(t *testing.T) {
var tcs = []struct {
name string
input int64
expected string
}{
{
name: tt.Name{
Given: "int",
When: "the value is valid",
Then: "return valid string",
}.Construct(),
input: 123,
expected: "123",
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
actual := FormatInt64(tc.input)
tt.Equal(t, tc.expected, actual)
})
}
}
func TestFormatSliceInt(t *testing.T) {
var tcs = []struct {
name string
input []int
expected []string
}{
{
name: tt.Name{
Given: "slice of int",
When: "the value is valid",
Then: "return valid slice of string",
}.Construct(),
input: []int{1, 2, 0},
expected: []string{"1", "2", "0"},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
actual := FormatSliceInt(tc.input)
tt.Equal(t, tc.expected, actual)
})
}
}
func TestFormatSliceInt64(t *testing.T) {
var tcs = []struct {
name string
input []int64
expected []string
}{
{
name: tt.Name{
Given: "slice of int64",
When: "the value is valid",
Then: "return valid slice of string",
}.Construct(),
input: []int64{1, 2, 0},
expected: []string{"1", "2", "0"},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
actual := FormatSliceInt64(tc.input)
tt.Equal(t, tc.expected, actual)
})
}
}
func TestConvertMapStringStringToStrings(t *testing.T) {
type in struct {
mp map[string]string
delimiter string
}
var tcs = []struct {
name string
input in
expected []string
}{
{
name: tt.Name{
Given: "map[string]string and delimiter",
When: "map is empty",
Then: "return converted data",
}.Construct(),
input: in{
delimiter: ":",
},
expected: []string{},
},
{
name: tt.Name{
Given: "map[string]string and delimiter",
When: "delimiter is empty",
Then: "return converted data",
}.Construct(),
input: in{
mp: map[string]string{
"hello": "world",
"user": "pass",
},
},
expected: []string{"helloworld", "userpass"},
},
{
name: tt.Name{
Given: "map[string]string and delimiter",
When: "both not empty",
Then: "return converted data",
}.Construct(),
input: in{
mp: map[string]string{
"hello": "world",
"user": "pass",
},
delimiter: ":",
},
expected: []string{"hello:world", "user:pass"},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
actual := ConvertMapStringStringToStrings(tc.input.mp, tc.input.delimiter)
// Sort to avoid mismatch.
sort.Slice(actual[:], func(i, j int) bool {
return actual[i] > actual[j]
})
sort.Slice(tc.expected[:], func(i, j int) bool {
return tc.expected[i] > tc.expected[j]
})
tt.Equal(t, tc.expected, actual)
})
}
}
|
{
var tcs = []struct {
name string
input []string
expected []int
}{
{
name: tt.Name{
Given: "slice of string",
When: "the value is valid",
Then: "return valid slice of int",
}.Construct(),
input: []string{"1", "2"},
expected: []int{1, 2},
},
{
name: tt.Name{
Given: "slice of string",
When: "the value is invalid",
Then: "return invalid slice of int",
}.Construct(),
input: []string{"x", "y"},
expected: []int{0, 0},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
actual := ParseSliceInt(tc.input)
tt.Equal(t, tc.expected, actual)
})
}
}
|
golden.rs
|
// The golden image is as follows:
// - the image name is "GOLDEN IMAGE"
// - the first exa is named AB
// - mode global, maximized window
// - sprite is empty
// - script is "COPY 1 X\nNOOP"
// - the second exa is named CD
// - mode local, maximized window
// - custom sprite, activated pixels are the 4 corners
// - script is "NOTE I AM A GOLDEN GOD\nHALT"
mod common;
use common::*;
#[test]
fn
|
() {
let mut bench = TestBench::redshift_vm_from_image("./tests/golden.png".to_string());
let e1 = bench.get_exa("AB");
let e2 = bench.get_exa("CD");
bench.assert_exa_global_mode(&e1);
bench.assert_exa_local_mode(&e2);
bench.assert_exa_sprite(&e2, vec![0, 1, 8, 1, 80, 1, 8, 1]);
bench.run_cycle();
bench.assert_exa_register(&e1, "x", 1);
}
|
test_golden
|
__init__.py
|
#MIT License
#
#Copyright (c) 2020 signag
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
`logging_plus` - Add-on to Python logging
This module extends the standard Python logging module for the following aspects:
- Subclassing Logger allows customization of logging messages depending on context
- This is used for automatic indentation depending on call stack level
- The module provides also the capability to generically log function entry and exit
"""
import logging
import inspect
import sys
import atexit
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
#This parameter controls whether or not logging inside infrastructure modules is activated.
#The following modules are affected: inspect, logging, logging-plus
#
noInfrastructureLogging = True
class Manager(logging.Manager):
"""
Subclassing logging.Manager supports instantiating the subclassed Logger
instead of the standard Logger.
"""
def __init__(self, rootnode):
"""
Initialize the manager
"""
super().__init__(rootnode)
def getLogger(self, name):
"""
Return the subclassed Logger rather than the standard Logger
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
logging._acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, logging.PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
logging._releaseLock()
return rv
def cleanupLoggers(self):
"""
Remove registered file handlers from all available loggers
"""
lgr = root
for hdl in reversed(lgr.handlers):
if isinstance(hdl, logging.FileHandler):
lgr.removeHandler(hdl)
for lgName in self.loggerDict:
lgr = self.getLogger(lgName)
for hdl in lgr.handlers:
if isinstance(hdl, logging.FileHandler):
lgr.removeHandler(hdl)
class Logger(logging.Logger):
def __init__(self, name, level=logging.NOTSET):
"""
Initialize the subclassed Logger
"""
super().__init__(name, level)
def debug(self, msg, *args, **kwargs):
"""
Indent DEBUG message according to call stack level before logging
"""
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Indent INFO message according to call stack level before logging
"""
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().info(msg, *args, **kwargs)
def
|
(self, msg, *args, **kwargs):
"""
Indent WARNING message according to call stack level before logging
"""
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Indent ERROR message according to call stack level before logging
"""
indent = len(inspect.stack()) - 1
msg = " " * indent + msg
super().error(msg, *args, **kwargs)
def logEntry(self, msg, *args, **kwargs):
"""
Log function entry with DEBUG sverity
"""
indent = len(inspect.stack()) - 2
if indent < 0:
indent = 0
msg = " " * indent + ">>> Entry " + msg
super().debug(msg, *args, **kwargs)
def autoLogEntry(self, msg, *args, **kwargs):
"""
Log function entry with DEBUG sverity in case of automatic logging
"""
indent = len(inspect.stack()) - 3
if indent < 0:
indent = 0
msg = " " * indent + ">>> Entry " + msg
super().debug(msg, *args, **kwargs)
def logExit(self, msg, *args, **kwargs):
"""
Log function exit with DEBUG sverity
"""
indent = len(inspect.stack()) - 2
if indent < 0:
indent = 0
msg = " " * indent + "<<< Exit " + msg
super().debug(msg, *args, **kwargs)
def autoLogExit(self, msg, *args, **kwargs):
"""
Log function exit with DEBUG sverity in case of automatic logging
"""
indent = len(inspect.stack()) - 3
if indent < 0:
indent = 0
msg = " " * indent + "<<< Exit " + msg
super().debug(msg, *args, **kwargs)
class RootLogger(Logger):
"""
This is the extended root logger
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
root = RootLogger(logging.WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
def getLogger(name=None):
"""
Return an extended logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if not name or isinstance(name, str) and name == root.name:
return root
return Logger.manager.getLogger(name)
def excludeFromLogging(frame):
"""
Check whether frame shall be excluded from logging.
This is the case if the module of the frame itself or on of its outer frames
belongs to the inspect or logging infrastructure
"""
if not frame:
return False
module = inspect.getmodule(frame)
if not module:
return False
moduleName = module.__name__
if (moduleName == "inspect") \
or (moduleName == "logging") \
or (moduleName == __name__):
#Do not log inside infrastructure modules
return True
else:
oframe = frame.f_back
if not oframe:
return False
return excludeFromLogging(oframe)
def autoLogIgnore(frame, event, arg):
"""
Function to register as trace function for scopes where logging shall be deactivated.
The function is used to log entry to a new scope ('call') or exit from a scope ('return').
"""
if (event == 'call'):
#Only call needs to be sensed
return autoLogIgnore
def autoLogEntryExit(frame, event, arg):
"""
Function to register as trace function for the current scope.
The function is used to log entry to a new scope ('call') or exit from a scope ('return').
"""
if (event == 'call') or (event == 'return'):
#Only call and return events are sensed
if not frame:
return autoLogIgnore
code_obj = frame.f_code
func_name = code_obj.co_name
file_name = code_obj.co_filename
file_line = code_obj.co_firstlineno
module = inspect.getmodule(frame)
if not module:
return autoLogIgnore
moduleName = module.__name__
if event == 'call':
#System has been entering a new scope.
if noInfrastructureLogging:
if excludeFromLogging(frame):
return autoLogIgnore
getLogger(moduleName).autoLogEntry('%s (%s - line %s - module %s)', func_name, file_name, file_line, moduleName)
#The function returns a reference to itself, in order to register itself as trace functuion for the new scope
return autoLogEntryExit
elif event == 'return':
#System is about to exit a scope (function or other code block). arg is the value being returned.
getLogger(moduleName).autoLogExit ('%s : Return value: %s', func_name, arg)
def removeFileHandlers():
"""This function removes file handlers from available loggers in order to avoid a race condition during shutdown
The Python shutdown sequence is as follows:
1. Stop main thread
2. Close open file handlers
3. Wait for termination of non-daemon threads
4. Execute registered atexit functions
5. Garbage collection
6. Process termination
If class __del__ functions include logging with file handlers, and if ojects are destroyed
during garbage collection (5), file output will lead to an exception
because open file handlers have already been closed (2).
Note: This applies only to explicit logging within the __del__ functions.
Automatic logging of entry and exit has already been switched off at this time
through unregisterAutoLogEntryExit.
"""
mgr = getLogger().manager
mgr.cleanupLoggers()
def registerAutoLogEntryExit():
"""
Register autoLogEntryExit as system trace function
This will issue logging whenever a function scope is entered or exited
"""
sys.settrace(autoLogEntryExit)
def unregisterAutoLogEntryExit():
"""
Clear system trace function
This will stop logging function entry / exit
"""
sys.settrace(None)
#Register unregisterAutoLogEntryExit to avoid logging exceptions during module shutdown
atexit.register(removeFileHandlers)
atexit.register(unregisterAutoLogEntryExit)
|
warning
|
hub.go
|
package quic
import (
"context"
"time"
quic "github.com/lucas-clemente/quic-go"
"v2ray.com/core/common"
"v2ray.com/core/common/net"
"v2ray.com/core/common/protocol/tls/cert"
"v2ray.com/core/common/signal/done"
"v2ray.com/core/transport/internet"
"v2ray.com/core/transport/internet/tls"
)
// Listener is an internet.Listener that listens for TCP connections.
type Listener struct {
rawConn *sysConn
listener quic.Listener
done *done.Instance
addConn internet.ConnHandler
}
func (l *Listener) acceptStreams(session quic.Session) {
for {
stream, err := session.AcceptStream()
if err != nil {
newError("failed to accept stream").Base(err).WriteToLog()
select {
case <-session.Context().Done():
return
case <-l.done.Wait():
session.Close()
return
default:
time.Sleep(time.Second)
continue
}
}
conn := &interConn{
stream: stream,
local: session.LocalAddr(),
remote: session.RemoteAddr(),
}
l.addConn(conn)
}
}
func (l *Listener) keepAccepting() {
for {
conn, err := l.listener.Accept()
if err != nil {
newError("failed to accept QUIC sessions").Base(err).WriteToLog()
if l.done.Done() {
break
}
time.Sleep(time.Second)
continue
}
go l.acceptStreams(conn)
}
}
// Addr implements internet.Listener.Addr.
func (l *Listener) Addr() net.Addr {
return l.listener.Addr()
}
// Close implements internet.Listener.Close.
func (l *Listener) Close() error {
l.done.Close()
l.listener.Close()
l.rawConn.Close()
return nil
}
// Listen creates a new Listener based on configurations.
func Listen(ctx context.Context, address net.Address, port net.Port, streamSettings *internet.MemoryStreamConfig, handler internet.ConnHandler) (internet.Listener, error) {
if address.Family().IsDomain() {
return nil, newError("domain address is not allows for listening quic")
}
tlsConfig := tls.ConfigFromStreamSettings(streamSettings)
if tlsConfig == nil {
tlsConfig = &tls.Config{
Certificate: []*tls.Certificate{tls.ParseCertificate(cert.MustGenerate(nil, cert.DNSNames(internalDomain), cert.CommonName(internalDomain)))},
}
}
config := streamSettings.ProtocolSettings.(*Config)
rawConn, err := internet.ListenSystemPacket(context.Background(), &net.UDPAddr{
IP: address.IP(),
Port: int(port),
}, streamSettings.SocketSettings)
if err != nil {
return nil, err
}
quicConfig := &quic.Config{
ConnectionIDLength: 12,
HandshakeTimeout: time.Second * 8,
IdleTimeout: time.Second * 120,
MaxIncomingStreams: 256,
MaxIncomingUniStreams: -1,
}
conn, err := wrapSysConn(rawConn, config)
if err != nil {
conn.Close()
return nil, err
}
qListener, err := quic.Listen(conn, tlsConfig.GetTLSConfig(), quicConfig)
if err != nil {
conn.Close()
return nil, err
}
listener := &Listener{
done: done.New(),
rawConn: conn,
listener: qListener,
addConn: handler,
}
go listener.keepAccepting()
return listener, nil
}
func
|
() {
common.Must(internet.RegisterTransportListener(protocolName, Listen))
}
|
init
|
foo.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
}
|
//
|
block.py
|
"""CSC148 Assignment 2
=== CSC148 Winter 2020 ===
Department of Computer Science,
University of Toronto
This code is provided solely for the personal and private use of
students taking the CSC148 course at the University of Toronto.
Copying for purposes other than this use is expressly prohibited.
All forms of distribution of this code, whether as given or with
any changes, are expressly prohibited.
Authors: Diane Horton, David Liu, Mario Badr, Sophia Huynh, Misha Schwartz,
and Jaisie Sin
All of the files in this directory and all subdirectories are:
Copyright (c) Diane Horton, David Liu, Mario Badr, Sophia Huynh,
Misha Schwartz, and Jaisie Sin
=== Module Description ===
This file contains the Block class, the main data structure used in the game.
"""
from __future__ import annotations
from typing import Optional, Tuple, List
import random
import math
from settings import colour_name, COLOUR_LIST
def generate_board(max_depth: int, size: int) -> Block:
|
class Block:
"""A square Block in the Blocky game, represented as a tree.
In addition to its tree-related attributes, a Block also contains attributes
that describe how the Block appears on a Cartesian plane. All positions
describe the upper left corner (x, y), and the origin is at (0, 0). All
positions and sizes are in the unit of pixels.
When a block has four children, the order of its children impacts each
child's position. Indices 0, 1, 2, and 3 are the upper-right child,
upper-left child, lower-left child, and lower-right child, respectively.
=== Public Attributes ===
position:
The (x, y) coordinates of the upper left corner of this Block.
size:
The height and width of this square Block.
colour:
If this block is not subdivided, <colour> stores its colour. Otherwise,
<colour> is None.
level:
The level of this block within the overall block structure.
The outermost block, corresponding to the root of the tree,
is at level zero. If a block is at level i, its children are at
level i+1.
max_depth:
The deepest level allowed in the overall block structure.
children:
The blocks into which this block is subdivided. The children are
stored in this order: upper-right child, upper-left child,
lower-left child, lower-right child.
=== Representation Invariants===
- len(children) == 0 or len(children) == 4
- If this Block has children:
- their max_depth is the same as that of this Block.
- their size is half that of this Block.
- their level is one greater than that of this Block.
- their position is determined by the position and size of this Block,
and their index in this Block's list of children.
- this Block's colour is None.
- If this Block has no children:
- its colour is not None.
- level <= max_depth
"""
position: Tuple[int, int]
size: int
colour: Optional[Tuple[int, int, int]]
level: int
max_depth: int
children: List[Block]
def __init__(self, position: Tuple[int, int], size: int,
colour: Optional[Tuple[int, int, int]], level: int,
max_depth: int) -> None:
"""Initialize this block with <position>, dimensions <size> by <size>,
the given <colour>, at <level>, and with no children.
Preconditions:
- position[0] >= 0 and position[1] >= 0
- size > 0
- level >= 0
- max_depth >= level
"""
self.position = position
self.size = size
self.colour = colour
self.level = level
self.max_depth = max_depth
self.children = []
def __str__(self) -> str:
"""Return this Block in a string format.
>>> block = Block((0, 0), 750, (0, 0, 0), 0, 1)
>>> str(block)
'Leaf: colour=Black, pos=(0, 0), size=750, level=0\\n'
"""
if len(self.children) == 0:
indents = '\t' * self.level
colour = colour_name(self.colour)
return f'{indents}Leaf: colour={colour}, pos={self.position}, ' \
f'size={self.size}, level={self.level}\n'
else:
indents = '\t' * self.level
result = f'{indents}Parent: pos={self.position},' \
f'size={self.size}, level={self.level}\n'
for child in self.children:
result += str(child)
return result
def __eq__(self, other: Block) -> bool:
"""Return True iff this Block and all its descendents are equivalent to
the <other> Block and all its descendents.
"""
if len(self.children) == 0 and len(other.children) == 0:
# Both self and other are leaves.
return self.position == other.position and \
self.size == other.size and \
self.colour == other.colour and \
self.level == other.level and \
self.max_depth == other.max_depth
elif len(self.children) != len(other.children):
# One of self or other is a leaf while the other is not.
return False
else:
# Both self and other have four children.
for i in range(4):
# The != operator also uses the __eq__ special method.
if self.children[i] != other.children[i]:
return False
return True
def _child_size(self) -> int:
"""Return the size of this Block's children.
"""
return round(self.size / 2.0)
def _children_positions(self) -> List[Tuple[int, int]]:
"""Return the positions of this Block's four children.
The positions are returned in this order: upper-right child, upper-left
child, lower-left child, lower-right child.
"""
x = self.position[0]
y = self.position[1]
size = self._child_size()
return [(x + size, y), (x, y), (x, y + size), (x + size, y + size)]
def _update_children_positions(self, position: Tuple[int, int]) -> None:
"""Set the position of this Block to <position> and update all its
descendants to have positions consistent with this Block's.
<position> is the (x, y) coordinates of the upper-left corner of this
Block.
"""
if len(self.children) == 0:
self.position = position[0], position[1]
else:
self.position = position[0], position[1]
for i in range(4):
self.children[i]._update_children_positions(
self._children_positions()[i])
def smashable(self) -> bool:
"""Return True iff this block can be smashed.
A block can be smashed if it has no children and its level is not at
max_depth.
"""
return self.level != self.max_depth and len(self.children) == 0
def smash(self) -> bool:
"""Sub-divide this block so that it has four randomly generated
children.
If this Block's level is <max_depth>, do nothing. If this block has
children, do nothing.
Return True iff the smash was performed.
"""
if not self.smashable():
return False
else:
self.colour = None
for i in range(4):
position = self._children_positions()[i]
size = self._child_size()
colour = COLOUR_LIST[random.randint(0, len(COLOUR_LIST)-1)]
level = self.level + 1
child = Block(position, size, colour, level, self.max_depth)
self.children.append(child)
for child in self.children:
subdivide = random.random()
if subdivide < math.exp(-0.25 * child.level):
child.smash()
return True
def swap(self, direction: int) -> bool:
"""Swap the child Blocks of this Block.
If this Block has no children, do nothing. Otherwise, if <direction> is
1, swap vertically. If <direction> is 0, swap horizontally.
Return True iff the swap was performed.
Precondition: <direction> is either 0 or 1
"""
if len(self.children) == 0:
return False
else:
# store original positions
positions = self._children_positions()
if direction == 1:
# vertically swapping the children in self.children
self.children[1], self.children[2], = self.children[2], \
self.children[1]
self.children[0], self.children[3] = self.children[3], \
self.children[0]
else:
# horizontally swapping the children in self.children
self.children[1], self.children[0] = self.children[0], \
self.children[1]
self.children[2], self.children[3] = self.children[3], \
self.children[2]
# updating children's positions
self.children[0]._update_children_positions(positions[0])
self.children[1]._update_children_positions(positions[1])
self.children[2]._update_children_positions(positions[2])
self.children[3]._update_children_positions(positions[3])
return True
def rotate(self, direction: int) -> bool:
"""Rotate this Block and all its descendants.
If this Block has no children, do nothing. If <direction> is 1, rotate
clockwise. If <direction> is 3, rotate counter-clockwise.
Return True iff the rotate was performed.
Precondition: <direction> is either 1 or 3.
"""
if len(self.children) == 0:
return False
elif direction == 1:
# rotate clockwise
positions = self._children_positions()
for i in range(4):
self.children[i].rotate(direction)
self.children[i]._update_children_positions(positions[i - 1])
self.children[0], self.children[1], self.children[2], \
self.children[3] = self.children[1], self.children[2], \
self.children[3], self.children[0]
return True
else:
# rotate counterclockwise
positions = self._children_positions()
for i in range(4):
self.children[i].rotate(direction)
self.children[i]._update_children_positions(positions[i - 3])
self.children[0], self.children[1], self.children[2], \
self.children[3] = self.children[3], self.children[0], \
self.children[1], self.children[2]
return True
def paint(self, colour: Tuple[int, int, int]) -> bool:
"""Change this Block's colour iff it is a leaf at a level of max_depth
and its colour is different from <colour>.
Return True iff this Block's colour was changed.
"""
if len(self.children) == 0 and self.level == self.max_depth and \
self.colour != colour:
self.colour = colour
return True
return False
def combine(self) -> bool:
"""Turn this Block into a leaf based on the majority colour of its
children.
The majority colour is the colour with the most child blocks of that
colour. A tie does not constitute a majority (e.g., if there are two red
children and two blue children, then there is no majority colour).
If there is no majority colour, do nothing. If this block is not at a
level of max_depth - 1, or this block has no children, do nothing.
Return True iff this Block was turned into a leaf node.
"""
if self.level != self.max_depth - 1 or len(self.children) == 0:
return False
else:
majority_colour = []
colours = []
for child in self.children:
colours.append(child.colour)
for colour in colours:
if colours.count(colour) >= 2 and colour not in majority_colour:
majority_colour.append(colour)
if len(majority_colour) == 1:
self.children = []
self.colour = majority_colour[0]
return True
return False
def create_copy(self) -> Block:
"""Return a new Block that is a deep copy of this Block.
Remember that a deep copy has new blocks (not aliases) at every level.
"""
if len(self.children) == 0:
return Block(self.position, self.size, self.colour, self.level,
self.max_depth)
else:
copy = Block(self.position, self.size, self.colour, self.level,
self.max_depth)
for child in self.children:
copy.children.append(child.create_copy())
return copy
if __name__ == '__main__':
import python_ta
python_ta.check_all(config={
'allowed-import-modules': [
'doctest', 'python_ta', 'random', 'typing', '__future__', 'math',
'settings'
],
'max-attributes': 15,
'max-args': 6
})
# This is a board consisting of only one block.
b1 = Block((0, 0), 750, COLOUR_LIST[0], 0, 1)
print("=== tiny board ===")
print(b1)
# Now let's make a random board.
b2 = generate_board(3, 750)
print("\n=== random board ===")
print(b2)
|
"""Return a new game board with a depth of <max_depth> and dimensions of
<size> by <size>.
>>> board = generate_board(3, 750)
>>> board.max_depth
3
>>> board.size
750
>>> len(board.children) == 4
True
"""
board = Block((0, 0), size, random.choice(COLOUR_LIST), 0, max_depth)
board.smash()
return board
|
set_request_builder.go
|
package set
import (
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459 "github.com/microsoftgraph/msgraph-beta-sdk-go/models/odataerrors"
i45fc41673b99130d86c1854da651a8f416ed902eef3acbecd5738f9ef72690a8 "github.com/microsoftgraph/msgraph-beta-sdk-go/models/termstore"
)
// SetRequestBuilder provides operations to manage the set property of the microsoft.graph.termStore.term entity.
type SetRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string
// The request adapter to use to execute the requests.
requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter
// Url template to use to build the URL for the current request builder
urlTemplate string
}
// SetRequestBuilderGetQueryParameters the [set] in which the term is created.
type SetRequestBuilderGetQueryParameters struct {
// Expand related entities
Expand []string `uriparametername:"%24expand"`
// Select properties to be returned
Select []string `uriparametername:"%24select"`
}
// SetRequestBuilderGetRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type SetRequestBuilderGetRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
// Request query parameters
QueryParameters *SetRequestBuilderGetQueryParameters
}
// NewSetRequestBuilderInternal instantiates a new SetRequestBuilder and sets the default values.
func NewSetRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*SetRequestBuilder)
|
// NewSetRequestBuilder instantiates a new SetRequestBuilder and sets the default values.
func NewSetRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*SetRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewSetRequestBuilderInternal(urlParams, requestAdapter)
}
// CreateGetRequestInformation the [set] in which the term is created.
func (m *SetRequestBuilder) CreateGetRequestInformation()(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreateGetRequestInformationWithRequestConfiguration(nil);
}
// CreateGetRequestInformationWithRequestConfiguration the [set] in which the term is created.
func (m *SetRequestBuilder) CreateGetRequestInformationWithRequestConfiguration(requestConfiguration *SetRequestBuilderGetRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.GET
if requestConfiguration != nil {
if requestConfiguration.QueryParameters != nil {
requestInfo.AddQueryParameters(*(requestConfiguration.QueryParameters))
}
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// Get the [set] in which the term is created.
func (m *SetRequestBuilder) Get()(i45fc41673b99130d86c1854da651a8f416ed902eef3acbecd5738f9ef72690a8.Setable, error) {
return m.GetWithRequestConfigurationAndResponseHandler(nil, nil);
}
// GetWithRequestConfigurationAndResponseHandler the [set] in which the term is created.
func (m *SetRequestBuilder) GetWithRequestConfigurationAndResponseHandler(requestConfiguration *SetRequestBuilderGetRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(i45fc41673b99130d86c1854da651a8f416ed902eef3acbecd5738f9ef72690a8.Setable, error) {
requestInfo, err := m.CreateGetRequestInformationWithRequestConfiguration(requestConfiguration);
if err != nil {
return nil, err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, i45fc41673b99130d86c1854da651a8f416ed902eef3acbecd5738f9ef72690a8.CreateSetFromDiscriminatorValue, responseHandler, errorMapping)
if err != nil {
return nil, err
}
return res.(i45fc41673b99130d86c1854da651a8f416ed902eef3acbecd5738f9ef72690a8.Setable), nil
}
|
{
m := &SetRequestBuilder{
}
m.urlTemplate = "{+baseurl}/users/{user%2Did}/joinedGroups/{group%2Did}/sites/{site%2Did}/termStore/groups/{group%2Did1}/sets/{set%2Did}/children/{term%2Did}/children/{term%2Did1}/set{?%24select,%24expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
|
formanggota.js
|
function validateForm() {
var nmlengkap = document.forms["myForm"]["nmlengkap"].value;
var notelp = document.forms["myForm"]["notelp"].value;
var tempat = document.forms["myForm"]["tempat"].value;
var tgllahir = document.forms["myForm"]["tgllahir"].value;
var umur = document.forms["myForm"]["umur"].value;
var alamat = document.forms["myForm"]["alamat"].value;
if (nmlengkap == "") {
validasi('Nama Lengkap wajib di isi!', 'warning');
return false;
} else if (tempat == '') {
validasi('Tempat Lahir wajib di isi!', 'warning');
return false;
} else if (notelp == '') {
validasi('NO.Telepon wajib di isi!', 'warning');
return false;
} else if (tgllahir == '') {
validasi('Tanggal Lahir wajib di isi!', 'warning');
return false;
} else if (umur == '') {
validasi('Umur wajib di isi!', 'warning');
return false;
} else if (alamat == '') {
validasi('Alamat wajib di isi!', 'warning');
return false;
}
}
function validasi(judul, status) {
swal.fire({
title: judul,
icon: status,
confirmButtonColor: '#4e73df',
});
}
function fileIsValid(fileName) {
var ext = fileName.match(/\.([^\.]+)$/)[1];
ext = ext.toLowerCase();
var isValid = true;
switch (ext) {
case 'png':
case 'jpeg':
case 'jpg':
case 'tiff':
case 'gif':
case 'tif':
case 'pdf':
break;
default:
this.value = '';
isValid = false;
}
return isValid;
}
function
|
() {
var file = document.getElementById('GetFile').files[0];
if (file != null) {
var fileName = file.name;
if (fileIsValid(fileName) == false) {
validasi('Format bukan gambar!', 'warning');
document.getElementById('GetFile').value = null;
return false;
}
var content;
var size = file.size;
if ((size != null) && ((size / (1024 * 1024)) > 3)) {
validasi('Ukuran maximum 1024px', 'warning');
document.getElementById('GetFile').value = null;
return false;
}
var ext = fileName.match(/\.([^\.]+)$/)[1];
ext = ext.toLowerCase();
if (ext == 'pdf') {
$('#pdf').show();
$('#img').hide();
$(".custom-file-label").addClass("selected").html(file.name);
document.getElementById('outputPdf').src = window.URL.createObjectURL(file);
} else {
$('#pdf').hide();
$('#img').show();
$(".custom-file-label").addClass("selected").html(file.name);
document.getElementById('outputImg').src = window.URL.createObjectURL(file);
}
return true;
} else
return false;
}
|
VerifyFileNameAndFileSize
|
main.go
|
// Copyright 2018 David Sansome
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"log"
"github.com/davidsansome/tsurukame/api"
"github.com/davidsansome/tsurukame/converter"
"github.com/davidsansome/tsurukame/encoding"
"github.com/davidsansome/tsurukame/jsonapi"
"github.com/davidsansome/tsurukame/utils"
)
var (
out = flag.String("out", "data", "Output directory")
cookie = flag.String("cookie", "", "Wanikani HTTP cookie")
apiToken = flag.String("api-token", "", "Wanikani API v2 token")
)
func main()
|
type Scraper struct {
apiClient *api.Client
jsonClient *jsonapi.Client
directory encoding.ReadWriter
}
func (s *Scraper) GetAll() error {
cur := s.apiClient.Subjects("")
SubjectLoop:
for {
subject, err := cur.Next()
if err != nil {
return err
}
if subject == nil {
break SubjectLoop
}
if subject.Data.HiddenAt != "" {
continue
}
spb, err := converter.SubjectToProto(subject)
if err != nil {
return err
}
// Fetch the other bits. We only need to do this for Radicals now that
// more data is included in the real WaniKani API.
if spb.Radical != nil {
if s.directory.HasSubject(subject.ID) {
continue SubjectLoop
}
r, err := s.jsonClient.GetRadical(subject.ID)
if err != nil {
log.Printf("Error getting radical %d: %v", subject.ID, err)
continue SubjectLoop
}
converter.AddRadical(spb, r)
}
// Write it to a file.
if err := s.directory.WriteSubject(subject.ID, spb); err != nil {
return err
}
}
return nil
}
|
{
flag.Parse()
// Create API clients.
apiClient, err := api.New(*apiToken)
utils.Must(err)
jsonClient, err := jsonapi.New(*cookie)
utils.Must(err)
// Open directory.
directory, err := encoding.OpenDirectory(*out)
utils.Must(err)
s := Scraper{apiClient, jsonClient, directory}
if err := s.GetAll(); err != nil {
panic(err)
}
}
|
_gen_defs_linux_64.py
|
# Automatically generated from system headers.
# DO NOT EDIT.
import ctypes
from .syscalldef import CType, SysCallSig, SysCallParamSig
PTRACE_TRACEME = 0
PTRACE_PEEKTEXT = 1
PTRACE_PEEKDATA = 2
PTRACE_PEEKUSER = 3
PTRACE_POKETEXT = 4
PTRACE_POKEDATA = 5
PTRACE_POKEUSER = 6
PTRACE_CONT = 7
PTRACE_KILL = 8
PTRACE_SINGLESTEP = 9
PTRACE_GETREGS = 12
PTRACE_SETREGS = 13
PTRACE_GETFPREGS = 14
PTRACE_SETFPREGS = 15
PTRACE_ATTACH = 16
PTRACE_DETACH = 17
PTRACE_GETFPXREGS = 18
PTRACE_SETFPXREGS = 19
PTRACE_SYSCALL = 24
PTRACE_SETOPTIONS = 0x4200
PTRACE_GETEVENTMSG = 0x4201
PTRACE_GETSIGINFO = 0x4202
PTRACE_SETSIGINFO = 0x4203
PTRACE_GETREGSET = 0x4204
PTRACE_SETREGSET = 0x4205
PTRACE_SEIZE = 0x4206
PTRACE_INTERRUPT = 0x4207
PTRACE_LISTEN = 0x4208
PTRACE_PEEKSIGINFO = 0x4209
PTRACE_GETSIGMASK = 0x420a
PTRACE_SETSIGMASK = 0x420b
PTRACE_SECCOMP_GET_FILTER = 0x420c
PTRACE_SEIZE_DEVEL = 0x80000000
PTRACE_O_TRACESYSGOOD = 0x00000001
PTRACE_O_TRACEFORK = 0x00000002
PTRACE_O_TRACEVFORK = 0x00000004
PTRACE_O_TRACECLONE = 0x00000008
PTRACE_O_TRACEEXEC = 0x00000010
PTRACE_O_TRACEVFORKDONE = 0x00000020
PTRACE_O_TRACEEXIT = 0x00000040
PTRACE_O_TRACESECCOMP = 0x00000080
PTRACE_O_EXITKILL = 0x00100000
PTRACE_O_SUSPEND_SECCOMP = 0x00200000
PTRACE_O_MASK = 0x003000ff
PTRACE_EVENT_FORK = 1
PTRACE_EVENT_VFORK = 2
PTRACE_EVENT_CLONE = 3
PTRACE_EVENT_EXEC = 4
PTRACE_EVENT_VFORK_DONE = 5
PTRACE_EVENT_EXIT = 6
PTRACE_EVENT_SECCOMP = 7
PTRACE_PEEKSIGINFO_SHARED = 1 << 0
class __ptrace_peeksiginfo_args(ctypes.Structure):
_fields_ = (
('off', ctypes.c_ulong),
('flags', ctypes.c_uint),
('nr', ctypes.c_int),
)
class user_fpregs_struct(ctypes.Structure):
_fields_ = (
('cwd', ctypes.c_ushort),
('swd', ctypes.c_ushort),
('ftw', ctypes.c_ushort),
('fop', ctypes.c_ushort),
('rip', ctypes.c_ulonglong),
('rdp', ctypes.c_ulonglong),
('mxcsr', ctypes.c_uint),
('mxcr_mask', ctypes.c_uint),
('st_space', ctypes.c_uint * 32),
('xmm_space', ctypes.c_uint * 64),
('padding', ctypes.c_uint * 24),
)
class user_regs_struct(ctypes.Structure):
_fields_ = (
('r15', ctypes.c_ulonglong),
('r14', ctypes.c_ulonglong),
('r13', ctypes.c_ulonglong),
('r12', ctypes.c_ulonglong),
('rbp', ctypes.c_ulonglong),
('rbx', ctypes.c_ulonglong),
('r11', ctypes.c_ulonglong),
('r10', ctypes.c_ulonglong),
('r9', ctypes.c_ulonglong),
('r8', ctypes.c_ulonglong),
('rax', ctypes.c_ulonglong),
('rcx', ctypes.c_ulonglong),
('rdx', ctypes.c_ulonglong),
('rsi', ctypes.c_ulonglong),
('rdi', ctypes.c_ulonglong),
('orig_rax', ctypes.c_ulonglong),
('rip', ctypes.c_ulonglong),
('cs', ctypes.c_ulonglong),
('eflags', ctypes.c_ulonglong),
('rsp', ctypes.c_ulonglong),
('ss', ctypes.c_ulonglong),
('fs_base', ctypes.c_ulonglong),
('gs_base', ctypes.c_ulonglong),
('ds', ctypes.c_ulonglong),
('es', ctypes.c_ulonglong),
('fs', ctypes.c_ulonglong),
('gs', ctypes.c_ulonglong),
)
class _anon_2(ctypes.Structure):
_fields_ = (
('si_pid', ctypes.c_int),
('si_uid', ctypes.c_uint),
)
class _anon_3(ctypes.Structure):
_fields_ = (
('si_tid', ctypes.c_int),
('si_overrun', ctypes.c_int),
('si_sigval', ctypes.c_void_p),
)
class _anon_4(ctypes.Structure):
_fields_ = (
('si_pid', ctypes.c_int),
('si_uid', ctypes.c_uint),
('si_sigval', ctypes.c_void_p),
)
class _anon_5(ctypes.Structure):
_fields_ = (
('si_pid', ctypes.c_int),
('si_uid', ctypes.c_uint),
('si_status', ctypes.c_int),
('si_utime', ctypes.c_long),
('si_stime', ctypes.c_long),
)
class _anon_7(ctypes.Structure):
_fields_ = (
('_lower', ctypes.c_void_p),
('_upper', ctypes.c_void_p),
)
class _anon_6(ctypes.Structure):
_fields_ = (
('si_addr', ctypes.c_void_p),
('si_addr_lsb', ctypes.c_short),
('si_addr_bnd', _anon_7),
)
class _anon_8(ctypes.Structure):
|
class _anon_9(ctypes.Structure):
_fields_ = (
('_call_addr', ctypes.c_void_p),
('_syscall', ctypes.c_int),
('_arch', ctypes.c_uint),
)
class _anon_1(ctypes.Union):
_fields_ = (
('_pad', ctypes.c_int * 28),
('_kill', _anon_2),
('_timer', _anon_3),
('_rt', _anon_4),
('_sigchld', _anon_5),
('_sigfault', _anon_6),
('_sigpoll', _anon_8),
('_sigsys', _anon_9),
)
class siginfo_t(ctypes.Structure):
_fields_ = (
('si_signo', ctypes.c_int),
('si_errno', ctypes.c_int),
('si_code', ctypes.c_int),
('_sifields', _anon_1),
)
SYSCALLS = {
'time': SysCallSig(
'time',
params=[
SysCallParamSig(
'tloc',
CType(
['time_t', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'stime': SysCallSig(
'stime',
params=[
SysCallParamSig(
'tptr',
CType(
['time_t', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'gettimeofday': SysCallSig(
'gettimeofday',
params=[
SysCallParamSig(
'tv',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'tz',
CType(
['struct', 'timezone', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'settimeofday': SysCallSig(
'settimeofday',
params=[
SysCallParamSig(
'tv',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'tz',
CType(
['struct', 'timezone', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'adjtimex': SysCallSig(
'adjtimex',
params=[
SysCallParamSig(
'txc_p',
CType(
['struct', 'timex', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'times': SysCallSig(
'times',
params=[
SysCallParamSig(
'tbuf',
CType(
['struct', 'tms', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'gettid': SysCallSig(
'gettid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'nanosleep': SysCallSig(
'nanosleep',
params=[
SysCallParamSig(
'rqtp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'rmtp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'alarm': SysCallSig(
'alarm',
params=[
SysCallParamSig(
'seconds',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpid': SysCallSig(
'getpid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getppid': SysCallSig(
'getppid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getuid': SysCallSig(
'getuid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'geteuid': SysCallSig(
'geteuid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getgid': SysCallSig(
'getgid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getegid': SysCallSig(
'getegid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getresuid': SysCallSig(
'getresuid',
params=[
SysCallParamSig(
'ruid',
CType(
['uid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'euid',
CType(
['uid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'suid',
CType(
['uid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getresgid': SysCallSig(
'getresgid',
params=[
SysCallParamSig(
'rgid',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'egid',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'sgid',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpgid': SysCallSig(
'getpgid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpgrp': SysCallSig(
'getpgrp',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getsid': SysCallSig(
'getsid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getgroups': SysCallSig(
'getgroups',
params=[
SysCallParamSig(
'gidsetsize',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'grouplist',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setregid': SysCallSig(
'setregid',
params=[
SysCallParamSig(
'rgid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'egid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setgid': SysCallSig(
'setgid',
params=[
SysCallParamSig(
'gid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setreuid': SysCallSig(
'setreuid',
params=[
SysCallParamSig(
'ruid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'euid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setuid': SysCallSig(
'setuid',
params=[
SysCallParamSig(
'uid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setresuid': SysCallSig(
'setresuid',
params=[
SysCallParamSig(
'ruid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'euid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'suid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setresgid': SysCallSig(
'setresgid',
params=[
SysCallParamSig(
'rgid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'egid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'sgid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setfsuid': SysCallSig(
'setfsuid',
params=[
SysCallParamSig(
'uid',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setfsgid': SysCallSig(
'setfsgid',
params=[
SysCallParamSig(
'gid',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setpgid': SysCallSig(
'setpgid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pgid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setsid': SysCallSig(
'setsid',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setgroups': SysCallSig(
'setgroups',
params=[
SysCallParamSig(
'gidsetsize',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'grouplist',
CType(
['gid_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'acct': SysCallSig(
'acct',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'capget': SysCallSig(
'capget',
params=[
SysCallParamSig(
'header',
CType(
['cap_user_header_t'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'dataptr',
CType(
['cap_user_data_t'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'capset': SysCallSig(
'capset',
params=[
SysCallParamSig(
'header',
CType(
['cap_user_header_t'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'data',
CType(
['const', 'cap_user_data_t'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'personality': SysCallSig(
'personality',
params=[
SysCallParamSig(
'personality',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sigpending': SysCallSig(
'sigpending',
params=[
SysCallParamSig(
'set',
CType(
['old_sigset_t', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sigprocmask': SysCallSig(
'sigprocmask',
params=[
SysCallParamSig(
'how',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'set',
CType(
['old_sigset_t', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'oset',
CType(
['old_sigset_t', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sigaltstack': SysCallSig(
'sigaltstack',
params=[
SysCallParamSig(
'uss',
CType(
['const', 'struct', 'sigaltstack', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uoss',
CType(
['struct', 'sigaltstack', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getitimer': SysCallSig(
'getitimer',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'value',
CType(
['struct', 'itimerval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setitimer': SysCallSig(
'setitimer',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'value',
CType(
['struct', 'itimerval', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'ovalue',
CType(
['struct', 'itimerval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_create': SysCallSig(
'timer_create',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'timer_event_spec',
CType(
['struct', 'sigevent', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'created_timer_id',
CType(
['timer_t', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_gettime': SysCallSig(
'timer_gettime',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'setting',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_getoverrun': SysCallSig(
'timer_getoverrun',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_settime': SysCallSig(
'timer_settime',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'new_setting',
CType(
['const', 'struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'old_setting',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timer_delete': SysCallSig(
'timer_delete',
params=[
SysCallParamSig(
'timer_id',
CType(
['timer_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_settime': SysCallSig(
'clock_settime',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tp',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_gettime': SysCallSig(
'clock_gettime',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_adjtime': SysCallSig(
'clock_adjtime',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tx',
CType(
['struct', 'timex', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_getres': SysCallSig(
'clock_getres',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'tp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clock_nanosleep': SysCallSig(
'clock_nanosleep',
params=[
SysCallParamSig(
'which_clock',
CType(
['clockid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'rqtp',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'rmtp',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'nice': SysCallSig(
'nice',
params=[
SysCallParamSig(
'increment',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setscheduler': SysCallSig(
'sched_setscheduler',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'policy',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'param',
CType(
['struct', 'sched_param', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setparam': SysCallSig(
'sched_setparam',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'param',
CType(
['struct', 'sched_param', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setattr': SysCallSig(
'sched_setattr',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'attr',
CType(
['struct', 'sched_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getscheduler': SysCallSig(
'sched_getscheduler',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getparam': SysCallSig(
'sched_getparam',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'param',
CType(
['struct', 'sched_param', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getattr': SysCallSig(
'sched_getattr',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'attr',
CType(
['struct', 'sched_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_setaffinity': SysCallSig(
'sched_setaffinity',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'user_mask_ptr',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_getaffinity': SysCallSig(
'sched_getaffinity',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'user_mask_ptr',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_yield': SysCallSig(
'sched_yield',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_get_priority_max': SysCallSig(
'sched_get_priority_max',
params=[
SysCallParamSig(
'policy',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_get_priority_min': SysCallSig(
'sched_get_priority_min',
params=[
SysCallParamSig(
'policy',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sched_rr_get_interval': SysCallSig(
'sched_rr_get_interval',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'interval',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setpriority': SysCallSig(
'setpriority',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'niceval',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpriority': SysCallSig(
'getpriority',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shutdown': SysCallSig(
'shutdown',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'reboot': SysCallSig(
'reboot',
params=[
SysCallParamSig(
'magic1',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'magic2',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'arg',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'restart_syscall': SysCallSig(
'restart_syscall',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kexec_load': SysCallSig(
'kexec_load',
params=[
SysCallParamSig(
'entry',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'nr_segments',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'segments',
CType(
['struct', 'kexec_segment', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kexec_file_load': SysCallSig(
'kexec_file_load',
params=[
SysCallParamSig(
'kernel_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'initrd_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmdline_len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'cmdline_ptr',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'exit': SysCallSig(
'exit',
params=[
SysCallParamSig(
'error_code',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'exit_group': SysCallSig(
'exit_group',
params=[
SysCallParamSig(
'error_code',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'wait4': SysCallSig(
'wait4',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'stat_addr',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'options',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ru',
CType(
['struct', 'rusage', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'waitid': SysCallSig(
'waitid',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'infop',
CType(
['struct', 'siginfo', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'options',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ru',
CType(
['struct', 'rusage', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'waitpid': SysCallSig(
'waitpid',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'stat_addr',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'options',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_tid_address': SysCallSig(
'set_tid_address',
params=[
SysCallParamSig(
'tidptr',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'futex': SysCallSig(
'futex',
params=[
SysCallParamSig(
'uaddr',
CType(
['u32', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'op',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'val',
CType(
['u32'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'utime',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uaddr2',
CType(
['u32', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'val3',
CType(
['u32'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'init_module': SysCallSig(
'init_module',
params=[
SysCallParamSig(
'umod',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'uargs',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'delete_module': SysCallSig(
'delete_module',
params=[
SysCallParamSig(
'name_user',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigsuspend': SysCallSig(
'rt_sigsuspend',
params=[
SysCallParamSig(
'unewset',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigaction': SysCallSig(
'rt_sigaction',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['const', 'struct', 'sigaction', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sigaction', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigprocmask': SysCallSig(
'rt_sigprocmask',
params=[
SysCallParamSig(
'how',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'set',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'oset',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigpending': SysCallSig(
'rt_sigpending',
params=[
SysCallParamSig(
'set',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigtimedwait': SysCallSig(
'rt_sigtimedwait',
params=[
SysCallParamSig(
'uthese',
CType(
['const', 'sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uinfo',
CType(
['siginfo_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'uts',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_tgsigqueueinfo': SysCallSig(
'rt_tgsigqueueinfo',
params=[
SysCallParamSig(
'tgid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'uinfo',
CType(
['siginfo_t', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kill': SysCallSig(
'kill',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'tgkill': SysCallSig(
'tgkill',
params=[
SysCallParamSig(
'tgid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'tkill': SysCallSig(
'tkill',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rt_sigqueueinfo': SysCallSig(
'rt_sigqueueinfo',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'uinfo',
CType(
['siginfo_t', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sgetmask': SysCallSig(
'sgetmask',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ssetmask': SysCallSig(
'ssetmask',
params=[
SysCallParamSig(
'newmask',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'signal': SysCallSig(
'signal',
params=[
SysCallParamSig(
'sig',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'handler',
CType(
['__sighandler_t'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pause': SysCallSig(
'pause',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sync': SysCallSig(
'sync',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fsync': SysCallSig(
'fsync',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fdatasync': SysCallSig(
'fdatasync',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'bdflush': SysCallSig(
'bdflush',
params=[
SysCallParamSig(
'func',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'data',
CType(
['long'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mount': SysCallSig(
'mount',
params=[
SysCallParamSig(
'dev_name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'dir_name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'type',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'data',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'umount': SysCallSig(
'umount',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'oldumount': SysCallSig(
'oldumount',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'truncate': SysCallSig(
'truncate',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'length',
CType(
['long'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ftruncate': SysCallSig(
'ftruncate',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'length',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'stat': SysCallSig(
'stat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', '__old_kernel_stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'statfs': SysCallSig(
'statfs',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'statfs64': SysCallSig(
'statfs64',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'sz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs64', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fstatfs': SysCallSig(
'fstatfs',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fstatfs64': SysCallSig(
'fstatfs64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'sz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'statfs64', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lstat': SysCallSig(
'lstat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', '__old_kernel_stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fstat': SysCallSig(
'fstat',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', '__old_kernel_stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newstat': SysCallSig(
'newstat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newlstat': SysCallSig(
'newlstat',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newfstat': SysCallSig(
'newfstat',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ustat': SysCallSig(
'ustat',
params=[
SysCallParamSig(
'dev',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'ubuf',
CType(
['struct', 'ustat', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setxattr': SysCallSig(
'setxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lsetxattr': SysCallSig(
'lsetxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fsetxattr': SysCallSig(
'fsetxattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getxattr': SysCallSig(
'getxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lgetxattr': SysCallSig(
'lgetxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fgetxattr': SysCallSig(
'fgetxattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'value',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'listxattr': SysCallSig(
'listxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'list',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'llistxattr': SysCallSig(
'llistxattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'list',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'flistxattr': SysCallSig(
'flistxattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'list',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'removexattr': SysCallSig(
'removexattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lremovexattr': SysCallSig(
'lremovexattr',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fremovexattr': SysCallSig(
'fremovexattr',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'brk': SysCallSig(
'brk',
params=[
SysCallParamSig(
'brk',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mprotect': SysCallSig(
'mprotect',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mremap': SysCallSig(
'mremap',
params=[
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'old_len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'new_len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'new_addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'remap_file_pages': SysCallSig(
'remap_file_pages',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pgoff',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msync': SysCallSig(
'msync',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fadvise64': SysCallSig(
'fadvise64',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'advice',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fadvise64_64': SysCallSig(
'fadvise64_64',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'len',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'advice',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'munmap': SysCallSig(
'munmap',
params=[
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mlock': SysCallSig(
'mlock',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'munlock': SysCallSig(
'munlock',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mlockall': SysCallSig(
'mlockall',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'munlockall': SysCallSig(
'munlockall',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'madvise': SysCallSig(
'madvise',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'behavior',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mincore': SysCallSig(
'mincore',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'vec',
CType(
['unsigned', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pivot_root': SysCallSig(
'pivot_root',
params=[
SysCallParamSig(
'new_root',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'put_old',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chroot': SysCallSig(
'chroot',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mknod': SysCallSig(
'mknod',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'dev',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'link': SysCallSig(
'link',
params=[
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'symlink': SysCallSig(
'symlink',
params=[
SysCallParamSig(
'old',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'new',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'unlink': SysCallSig(
'unlink',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rename': SysCallSig(
'rename',
params=[
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chmod': SysCallSig(
'chmod',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchmod': SysCallSig(
'fchmod',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fcntl': SysCallSig(
'fcntl',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'arg',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pipe': SysCallSig(
'pipe',
params=[
SysCallParamSig(
'fildes',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pipe2': SysCallSig(
'pipe2',
params=[
SysCallParamSig(
'fildes',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'dup': SysCallSig(
'dup',
params=[
SysCallParamSig(
'fildes',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'dup2': SysCallSig(
'dup2',
params=[
SysCallParamSig(
'oldfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'newfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'dup3': SysCallSig(
'dup3',
params=[
SysCallParamSig(
'oldfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'newfd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioperm': SysCallSig(
'ioperm',
params=[
SysCallParamSig(
'from',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'num',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'on',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioctl': SysCallSig(
'ioctl',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'arg',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'flock': SysCallSig(
'flock',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_setup': SysCallSig(
'io_setup',
params=[
SysCallParamSig(
'nr_reqs',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'ctx',
CType(
['aio_context_t', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_destroy': SysCallSig(
'io_destroy',
params=[
SysCallParamSig(
'ctx',
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_getevents': SysCallSig(
'io_getevents',
params=[
SysCallParamSig(
'ctx_id',
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'min_nr',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'nr',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'events',
CType(
['struct', 'io_event', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'timeout',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_submit': SysCallSig(
'io_submit',
params=[
SysCallParamSig(
None,
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'__foo',
CType(
['struct', 'iocb', '*', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'io_cancel': SysCallSig(
'io_cancel',
params=[
SysCallParamSig(
'ctx_id',
CType(
['aio_context_t'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'iocb',
CType(
['struct', 'iocb', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'result',
CType(
['struct', 'io_event', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendfile': SysCallSig(
'sendfile',
params=[
SysCallParamSig(
'out_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'in_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['off_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendfile64': SysCallSig(
'sendfile64',
params=[
SysCallParamSig(
'out_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'in_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readlink': SysCallSig(
'readlink',
params=[
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'bufsiz',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'creat': SysCallSig(
'creat',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'open': SysCallSig(
'open',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'close': SysCallSig(
'close',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'access': SysCallSig(
'access',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'vhangup': SysCallSig(
'vhangup',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chown': SysCallSig(
'chown',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lchown': SysCallSig(
'lchown',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchown': SysCallSig(
'fchown',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'utime': SysCallSig(
'utime',
params=[
SysCallParamSig(
'filename',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'times',
CType(
['struct', 'utimbuf', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'utimes': SysCallSig(
'utimes',
params=[
SysCallParamSig(
'filename',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'utimes',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lseek': SysCallSig(
'lseek',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'offset',
CType(
['off_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'whence',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'llseek': SysCallSig(
'llseek',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'offset_high',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'offset_low',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'result',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'whence',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'read': SysCallSig(
'read',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readahead': SysCallSig(
'readahead',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readv': SysCallSig(
'readv',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'write': SysCallSig(
'write',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'writev': SysCallSig(
'writev',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pread64': SysCallSig(
'pread64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'pos',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pwrite64': SysCallSig(
'pwrite64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buf',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'pos',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'preadv': SysCallSig(
'preadv',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'preadv2': SysCallSig(
'preadv2',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pwritev': SysCallSig(
'pwritev',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pwritev2': SysCallSig(
'pwritev2',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'vec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_l',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pos_h',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getcwd': SysCallSig(
'getcwd',
params=[
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mkdir': SysCallSig(
'mkdir',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'chdir': SysCallSig(
'chdir',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchdir': SysCallSig(
'fchdir',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'rmdir': SysCallSig(
'rmdir',
params=[
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'lookup_dcookie': SysCallSig(
'lookup_dcookie',
params=[
SysCallParamSig(
'cookie64',
CType(
['u64'],
ctypes.c_ulonglong,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'quotactl': SysCallSig(
'quotactl',
params=[
SysCallParamSig(
'cmd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'special',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'id',
CType(
['qid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'addr',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getdents': SysCallSig(
'getdents',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'dirent',
CType(
['struct', 'linux_dirent', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getdents64': SysCallSig(
'getdents64',
params=[
SysCallParamSig(
'fd',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'dirent',
CType(
['struct', 'linux_dirent64', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setsockopt': SysCallSig(
'setsockopt',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'level',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optname',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optval',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'optlen',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getsockopt': SysCallSig(
'getsockopt',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'level',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optname',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'optval',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'optlen',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'bind': SysCallSig(
'bind',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'connect': SysCallSig(
'connect',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'accept': SysCallSig(
'accept',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'accept4': SysCallSig(
'accept4',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getsockname': SysCallSig(
'getsockname',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getpeername': SysCallSig(
'getpeername',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'send': SysCallSig(
'send',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendto': SysCallSig(
'sendto',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendmsg': SysCallSig(
'sendmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'user_msghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sendmmsg': SysCallSig(
'sendmmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'mmsghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recv': SysCallSig(
'recv',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recvfrom': SysCallSig(
'recvfrom',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'sockaddr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recvmsg': SysCallSig(
'recvmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'user_msghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'recvmmsg': SysCallSig(
'recvmmsg',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg',
CType(
['struct', 'mmsghdr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'vlen',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'timeout',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'socket': SysCallSig(
'socket',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'socketpair': SysCallSig(
'socketpair',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'socketcall': SysCallSig(
'socketcall',
params=[
SysCallParamSig(
'call',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'args',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'listen': SysCallSig(
'listen',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'poll': SysCallSig(
'poll',
params=[
SysCallParamSig(
'ufds',
CType(
['struct', 'pollfd', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nfds',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'timeout',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'select': SysCallSig(
'select',
params=[
SysCallParamSig(
'n',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'inp',
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'outp',
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'exp',
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'tvp',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'old_select': SysCallSig(
'old_select',
params=[
SysCallParamSig(
'arg',
CType(
['struct', 'sel_arg_struct', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_create': SysCallSig(
'epoll_create',
params=[
SysCallParamSig(
'size',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_create1': SysCallSig(
'epoll_create1',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_ctl': SysCallSig(
'epoll_ctl',
params=[
SysCallParamSig(
'epfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'op',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'event',
CType(
['struct', 'epoll_event', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_wait': SysCallSig(
'epoll_wait',
params=[
SysCallParamSig(
'epfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'events',
CType(
['struct', 'epoll_event', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'maxevents',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'timeout',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'epoll_pwait': SysCallSig(
'epoll_pwait',
params=[
SysCallParamSig(
'epfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'events',
CType(
['struct', 'epoll_event', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'maxevents',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'timeout',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sigmask',
CType(
['const', 'sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sigsetsize',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'gethostname': SysCallSig(
'gethostname',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sethostname': SysCallSig(
'sethostname',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setdomainname': SysCallSig(
'setdomainname',
params=[
SysCallParamSig(
'name',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newuname': SysCallSig(
'newuname',
params=[
SysCallParamSig(
'name',
CType(
['struct', 'new_utsname', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'uname': SysCallSig(
'uname',
params=[
SysCallParamSig(
None,
CType(
['struct', 'old_utsname', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'olduname': SysCallSig(
'olduname',
params=[
SysCallParamSig(
None,
CType(
['struct', 'oldold_utsname', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getrlimit': SysCallSig(
'getrlimit',
params=[
SysCallParamSig(
'resource',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'rlim',
CType(
['struct', 'rlimit', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setrlimit': SysCallSig(
'setrlimit',
params=[
SysCallParamSig(
'resource',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'rlim',
CType(
['struct', 'rlimit', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'prlimit64': SysCallSig(
'prlimit64',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'resource',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'new_rlim',
CType(
['const', 'struct', 'rlimit64', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'old_rlim',
CType(
['struct', 'rlimit64', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getrusage': SysCallSig(
'getrusage',
params=[
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ru',
CType(
['struct', 'rusage', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'umask': SysCallSig(
'umask',
params=[
SysCallParamSig(
'mask',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgget': SysCallSig(
'msgget',
params=[
SysCallParamSig(
'key',
CType(
['key_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msgflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgsnd': SysCallSig(
'msgsnd',
params=[
SysCallParamSig(
'msqid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msgp',
CType(
['struct', 'msgbuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'msgsz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msgflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgrcv': SysCallSig(
'msgrcv',
params=[
SysCallParamSig(
'msqid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msgp',
CType(
['struct', 'msgbuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'msgsz',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msgtyp',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'msgflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'msgctl': SysCallSig(
'msgctl',
params=[
SysCallParamSig(
'msqid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'msqid_ds', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semget': SysCallSig(
'semget',
params=[
SysCallParamSig(
'key',
CType(
['key_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nsems',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'semflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semop': SysCallSig(
'semop',
params=[
SysCallParamSig(
'semid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sops',
CType(
['struct', 'sembuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nsops',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semctl': SysCallSig(
'semctl',
params=[
SysCallParamSig(
'semid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'semnum',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'semtimedop': SysCallSig(
'semtimedop',
params=[
SysCallParamSig(
'semid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'sops',
CType(
['struct', 'sembuf', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nsops',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'timeout',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmat': SysCallSig(
'shmat',
params=[
SysCallParamSig(
'shmid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'shmaddr',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'shmflg',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmget': SysCallSig(
'shmget',
params=[
SysCallParamSig(
'key',
CType(
['key_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'size',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmdt': SysCallSig(
'shmdt',
params=[
SysCallParamSig(
'shmaddr',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'shmctl': SysCallSig(
'shmctl',
params=[
SysCallParamSig(
'shmid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'buf',
CType(
['struct', 'shmid_ds', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ipc': SysCallSig(
'ipc',
params=[
SysCallParamSig(
'call',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'first',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'second',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'third',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'ptr',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'fifth',
CType(
['long'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_open': SysCallSig(
'mq_open',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'oflag',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'attr',
CType(
['struct', 'mq_attr', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_unlink': SysCallSig(
'mq_unlink',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_timedsend': SysCallSig(
'mq_timedsend',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg_ptr',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'msg_len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msg_prio',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'abs_timeout',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_timedreceive': SysCallSig(
'mq_timedreceive',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'msg_ptr',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'msg_len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'msg_prio',
CType(
['unsigned', 'int', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'abs_timeout',
CType(
['const', 'struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_notify': SysCallSig(
'mq_notify',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'notification',
CType(
['const', 'struct', 'sigevent', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mq_getsetattr': SysCallSig(
'mq_getsetattr',
params=[
SysCallParamSig(
'mqdes',
CType(
['mqd_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mqstat',
CType(
['const', 'struct', 'mq_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'omqstat',
CType(
['struct', 'mq_attr', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pciconfig_iobase': SysCallSig(
'pciconfig_iobase',
params=[
SysCallParamSig(
'which',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'bus',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'devfn',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pciconfig_read': SysCallSig(
'pciconfig_read',
params=[
SysCallParamSig(
'bus',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'dfn',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'off',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'buf',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pciconfig_write': SysCallSig(
'pciconfig_write',
params=[
SysCallParamSig(
'bus',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'dfn',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'off',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'buf',
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'prctl': SysCallSig(
'prctl',
params=[
SysCallParamSig(
'option',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg3',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg4',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg5',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'swapon': SysCallSig(
'swapon',
params=[
SysCallParamSig(
'specialfile',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'swap_flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'swapoff': SysCallSig(
'swapoff',
params=[
SysCallParamSig(
'specialfile',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sysctl': SysCallSig(
'sysctl',
params=[
SysCallParamSig(
'args',
CType(
['struct', '__sysctl_args', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sysinfo': SysCallSig(
'sysinfo',
params=[
SysCallParamSig(
'info',
CType(
['struct', 'sysinfo', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sysfs': SysCallSig(
'sysfs',
params=[
SysCallParamSig(
'option',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg1',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'syslog': SysCallSig(
'syslog',
params=[
SysCallParamSig(
'type',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'len',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'uselib': SysCallSig(
'uselib',
params=[
SysCallParamSig(
'library',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ni_syscall': SysCallSig(
'ni_syscall',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ptrace': SysCallSig(
'ptrace',
params=[
SysCallParamSig(
'request',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'pid',
CType(
['long'],
ctypes.c_long,
0
)
),
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'data',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'add_key': SysCallSig(
'add_key',
params=[
SysCallParamSig(
'_type',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_description',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_payload',
CType(
['const', 'void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
'plen',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'destringid',
CType(
['key_serial_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'request_key': SysCallSig(
'request_key',
params=[
SysCallParamSig(
'_type',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_description',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'_callout_info',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'destringid',
CType(
['key_serial_t'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'keyctl': SysCallSig(
'keyctl',
params=[
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'arg2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg3',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg4',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'arg5',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioprio_set': SysCallSig(
'ioprio_set',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'ioprio',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioprio_get': SysCallSig(
'ioprio_get',
params=[
SysCallParamSig(
'which',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'who',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_mempolicy': SysCallSig(
'set_mempolicy',
params=[
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nmask',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'migrate_pages': SysCallSig(
'migrate_pages',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'from',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'to',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'move_pages': SysCallSig(
'move_pages',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nr_pages',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pages',
CType(
['const', 'void', '*', '*'],
ctypes.c_long,
2
)
),
SysCallParamSig(
'nodes',
CType(
['const', 'int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'status',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mbind': SysCallSig(
'mbind',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'mode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'nmask',
CType(
['const', 'unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'get_mempolicy': SysCallSig(
'get_mempolicy',
params=[
SysCallParamSig(
'policy',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'nmask',
CType(
['unsigned', 'long', '*'],
ctypes.c_ulong,
1
)
),
SysCallParamSig(
'maxnode',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_init': SysCallSig(
'inotify_init',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_init1': SysCallSig(
'inotify_init1',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_add_watch': SysCallSig(
'inotify_add_watch',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mask',
CType(
['u32'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'inotify_rm_watch': SysCallSig(
'inotify_rm_watch',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'wd',
CType(
['__s32'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'spu_run': SysCallSig(
'spu_run',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'unpc',
CType(
['__u32', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'ustatus',
CType(
['__u32', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'spu_create': SysCallSig(
'spu_create',
params=[
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mknodat': SysCallSig(
'mknodat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
SysCallParamSig(
'dev',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mkdirat': SysCallSig(
'mkdirat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'unlinkat': SysCallSig(
'unlinkat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'symlinkat': SysCallSig(
'symlinkat',
params=[
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'linkat': SysCallSig(
'linkat',
params=[
SysCallParamSig(
'olddfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'renameat': SysCallSig(
'renameat',
params=[
SysCallParamSig(
'olddfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'renameat2': SysCallSig(
'renameat2',
params=[
SysCallParamSig(
'olddfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'oldname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'newdfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'newname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'futimesat': SysCallSig(
'futimesat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'utimes',
CType(
['struct', 'timeval', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'faccessat': SysCallSig(
'faccessat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchmodat': SysCallSig(
'fchmodat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fchownat': SysCallSig(
'fchownat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'user',
CType(
['uid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'group',
CType(
['gid_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'openat': SysCallSig(
'openat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['umode_t'],
ctypes.c_ushort,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'newfstatat': SysCallSig(
'newfstatat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'statbuf',
CType(
['struct', 'stat', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'readlinkat': SysCallSig(
'readlinkat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'bufsiz',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'utimensat': SysCallSig(
'utimensat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'utimes',
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'unshare': SysCallSig(
'unshare',
params=[
SysCallParamSig(
'unshare_flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'splice': SysCallSig(
'splice',
params=[
SysCallParamSig(
'fd_in',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_in',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'fd_out',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_out',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'vmsplice': SysCallSig(
'vmsplice',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'iov',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'nr_segs',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'tee': SysCallSig(
'tee',
params=[
SysCallParamSig(
'fdin',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'fdout',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sync_file_range': SysCallSig(
'sync_file_range',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'nbytes',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'sync_file_range2': SysCallSig(
'sync_file_range2',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'nbytes',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'get_robust_list': SysCallSig(
'get_robust_list',
params=[
SysCallParamSig(
'pid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'head_ptr',
CType(
['struct', 'robust_list_head', '*', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'len_ptr',
CType(
['size_t', '*'],
ctypes.c_uint,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_robust_list': SysCallSig(
'set_robust_list',
params=[
SysCallParamSig(
'head',
CType(
['struct', 'robust_list_head', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getcpu': SysCallSig(
'getcpu',
params=[
SysCallParamSig(
'cpu',
CType(
['unsigned', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'node',
CType(
['unsigned', '*'],
ctypes.c_uint,
1
)
),
SysCallParamSig(
'cache',
CType(
['struct', 'getcpu_cache', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'signalfd': SysCallSig(
'signalfd',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'user_mask',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sizemask',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'signalfd4': SysCallSig(
'signalfd4',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'user_mask',
CType(
['sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'sizemask',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timerfd_create': SysCallSig(
'timerfd_create',
params=[
SysCallParamSig(
'clockid',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timerfd_settime': SysCallSig(
'timerfd_settime',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'utmr',
CType(
['const', 'struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'otmr',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'timerfd_gettime': SysCallSig(
'timerfd_gettime',
params=[
SysCallParamSig(
'ufd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'otmr',
CType(
['struct', 'itimerspec', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'eventfd': SysCallSig(
'eventfd',
params=[
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'eventfd2': SysCallSig(
'eventfd2',
params=[
SysCallParamSig(
'count',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'memfd_create': SysCallSig(
'memfd_create',
params=[
SysCallParamSig(
'uname_ptr',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'userfaultfd': SysCallSig(
'userfaultfd',
params=[
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fallocate': SysCallSig(
'fallocate',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'mode',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'offset',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
SysCallParamSig(
'len',
CType(
['loff_t'],
ctypes.c_longlong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'old_readdir': SysCallSig(
'old_readdir',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'old_linux_dirent', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pselect6': SysCallSig(
'pselect6',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['fd_set', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ppoll': SysCallSig(
'ppoll',
params=[
SysCallParamSig(
None,
CType(
['struct', 'pollfd', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
None,
CType(
['struct', 'timespec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['const', 'sigset_t', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
None,
CType(
['size_t'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fanotify_init': SysCallSig(
'fanotify_init',
params=[
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'event_f_flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fanotify_mark': SysCallSig(
'fanotify_mark',
params=[
SysCallParamSig(
'fanotify_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mask',
CType(
['u64'],
ctypes.c_ulonglong,
0
)
),
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pathname',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'syncfs': SysCallSig(
'syncfs',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'fork': SysCallSig(
'fork',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'vfork': SysCallSig(
'vfork',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'clone': SysCallSig(
'clone',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
None,
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'execve': SysCallSig(
'execve',
params=[
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'argv',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
SysCallParamSig(
'envp',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'perf_event_open': SysCallSig(
'perf_event_open',
params=[
SysCallParamSig(
'attr_uptr',
CType(
['struct', 'perf_event_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'cpu',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'group_fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mmap_pgoff': SysCallSig(
'mmap_pgoff',
params=[
SysCallParamSig(
'addr',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'fd',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pgoff',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'old_mmap': SysCallSig(
'old_mmap',
params=[
SysCallParamSig(
'arg',
CType(
['struct', 'mmap_arg_struct', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'name_to_handle_at': SysCallSig(
'name_to_handle_at',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'name',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'handle',
CType(
['struct', 'file_handle', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'mnt_id',
CType(
['int', '*'],
ctypes.c_int,
1
)
),
SysCallParamSig(
'flag',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'open_by_handle_at': SysCallSig(
'open_by_handle_at',
params=[
SysCallParamSig(
'mountdirfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'handle',
CType(
['struct', 'file_handle', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'setns': SysCallSig(
'setns',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'nstype',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'process_vm_readv': SysCallSig(
'process_vm_readv',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'lvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'liovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'rvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'riovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'process_vm_writev': SysCallSig(
'process_vm_writev',
params=[
SysCallParamSig(
'pid',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'lvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'liovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'rvec',
CType(
['const', 'struct', 'iovec', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'riovcnt',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'kcmp': SysCallSig(
'kcmp',
params=[
SysCallParamSig(
'pid1',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'pid2',
CType(
['pid_t'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'type',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'idx1',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'idx2',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'finit_module': SysCallSig(
'finit_module',
params=[
SysCallParamSig(
'fd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'uargs',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'seccomp': SysCallSig(
'seccomp',
params=[
SysCallParamSig(
'op',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'uargs',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'getrandom': SysCallSig(
'getrandom',
params=[
SysCallParamSig(
'buf',
CType(
['char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'count',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'bpf': SysCallSig(
'bpf',
params=[
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'attr',
CType(
['union', 'bpf_attr', '*'],
ctypes.c_void_p,
0
)
),
SysCallParamSig(
'size',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'execveat': SysCallSig(
'execveat',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'filename',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'argv',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
SysCallParamSig(
'envp',
CType(
['const', 'const', 'char', '*', '*'],
ctypes.c_char,
2
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'membarrier': SysCallSig(
'membarrier',
params=[
SysCallParamSig(
'cmd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'copy_file_range': SysCallSig(
'copy_file_range',
params=[
SysCallParamSig(
'fd_in',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_in',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'fd_out',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'off_out',
CType(
['loff_t', '*'],
ctypes.c_longlong,
1
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mlock2': SysCallSig(
'mlock2',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'flags',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pkey_mprotect': SysCallSig(
'pkey_mprotect',
params=[
SysCallParamSig(
'start',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'len',
CType(
['size_t'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'prot',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'pkey',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pkey_alloc': SysCallSig(
'pkey_alloc',
params=[
SysCallParamSig(
'flags',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
'init_val',
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'pkey_free': SysCallSig(
'pkey_free',
params=[
SysCallParamSig(
'pkey',
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'statx': SysCallSig(
'statx',
params=[
SysCallParamSig(
'dfd',
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
'path',
CType(
['const', 'char', '*'],
ctypes.c_char,
1
)
),
SysCallParamSig(
'flags',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'mask',
CType(
['unsigned'],
ctypes.c_uint,
0
)
),
SysCallParamSig(
'buffer',
CType(
['struct', 'statx', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'ioperm': SysCallSig(
'ioperm',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'iopl': SysCallSig(
'iopl',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'int'],
ctypes.c_uint,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'modify_ldt': SysCallSig(
'modify_ldt',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['void', '*'],
ctypes.c_long,
1
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['int'], ctypes.c_int, 0)
),
'rt_sigreturn': SysCallSig(
'rt_sigreturn',
params=[
SysCallParamSig(
None,
CType(
['void'],
ctypes.c_long,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'set_thread_area': SysCallSig(
'set_thread_area',
params=[
SysCallParamSig(
None,
CType(
['struct', 'user_desc', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'get_thread_area': SysCallSig(
'get_thread_area',
params=[
SysCallParamSig(
None,
CType(
['struct', 'user_desc', '*'],
ctypes.c_void_p,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'arch_prctl': SysCallSig(
'arch_prctl',
params=[
SysCallParamSig(
None,
CType(
['int'],
ctypes.c_int,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
'mmap': SysCallSig(
'mmap',
params=[
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
SysCallParamSig(
None,
CType(
['unsigned', 'long'],
ctypes.c_ulong,
0
)
),
],
result=CType(['long'], ctypes.c_long, 0)
),
}
SYSCALL_NUMBERS = {
0: 'read',
1: 'write',
2: 'open',
3: 'close',
4: 'stat',
5: 'fstat',
6: 'lstat',
7: 'poll',
8: 'lseek',
9: 'mmap',
10: 'mprotect',
11: 'munmap',
12: 'brk',
13: 'rt_sigaction',
14: 'rt_sigprocmask',
15: 'rt_sigreturn',
16: 'ioctl',
17: 'pread64',
18: 'pwrite64',
19: 'readv',
20: 'writev',
21: 'access',
22: 'pipe',
23: 'select',
24: 'sched_yield',
25: 'mremap',
26: 'msync',
27: 'mincore',
28: 'madvise',
29: 'shmget',
30: 'shmat',
31: 'shmctl',
32: 'dup',
33: 'dup2',
34: 'pause',
35: 'nanosleep',
36: 'getitimer',
37: 'alarm',
38: 'setitimer',
39: 'getpid',
40: 'sendfile',
41: 'socket',
42: 'connect',
43: 'accept',
44: 'sendto',
45: 'recvfrom',
46: 'sendmsg',
47: 'recvmsg',
48: 'shutdown',
49: 'bind',
50: 'listen',
51: 'getsockname',
52: 'getpeername',
53: 'socketpair',
54: 'setsockopt',
55: 'getsockopt',
56: 'clone',
57: 'fork',
58: 'vfork',
59: 'execve',
60: 'exit',
61: 'wait4',
62: 'kill',
63: 'uname',
64: 'semget',
65: 'semop',
66: 'semctl',
67: 'shmdt',
68: 'msgget',
69: 'msgsnd',
70: 'msgrcv',
71: 'msgctl',
72: 'fcntl',
73: 'flock',
74: 'fsync',
75: 'fdatasync',
76: 'truncate',
77: 'ftruncate',
78: 'getdents',
79: 'getcwd',
80: 'chdir',
81: 'fchdir',
82: 'rename',
83: 'mkdir',
84: 'rmdir',
85: 'creat',
86: 'link',
87: 'unlink',
88: 'symlink',
89: 'readlink',
90: 'chmod',
91: 'fchmod',
92: 'chown',
93: 'fchown',
94: 'lchown',
95: 'umask',
96: 'gettimeofday',
97: 'getrlimit',
98: 'getrusage',
99: 'sysinfo',
100: 'times',
101: 'ptrace',
102: 'getuid',
103: 'syslog',
104: 'getgid',
105: 'setuid',
106: 'setgid',
107: 'geteuid',
108: 'getegid',
109: 'setpgid',
110: 'getppid',
111: 'getpgrp',
112: 'setsid',
113: 'setreuid',
114: 'setregid',
115: 'getgroups',
116: 'setgroups',
117: 'setresuid',
118: 'getresuid',
119: 'setresgid',
120: 'getresgid',
121: 'getpgid',
122: 'setfsuid',
123: 'setfsgid',
124: 'getsid',
125: 'capget',
126: 'capset',
127: 'rt_sigpending',
128: 'rt_sigtimedwait',
129: 'rt_sigqueueinfo',
130: 'rt_sigsuspend',
131: 'sigaltstack',
132: 'utime',
133: 'mknod',
134: 'uselib',
135: 'personality',
136: 'ustat',
137: 'statfs',
138: 'fstatfs',
139: 'sysfs',
140: 'getpriority',
141: 'setpriority',
142: 'sched_setparam',
143: 'sched_getparam',
144: 'sched_setscheduler',
145: 'sched_getscheduler',
146: 'sched_get_priority_max',
147: 'sched_get_priority_min',
148: 'sched_rr_get_interval',
149: 'mlock',
150: 'munlock',
151: 'mlockall',
152: 'munlockall',
153: 'vhangup',
154: 'modify_ldt',
155: 'pivot_root',
156: '_sysctl',
157: 'prctl',
158: 'arch_prctl',
159: 'adjtimex',
160: 'setrlimit',
161: 'chroot',
162: 'sync',
163: 'acct',
164: 'settimeofday',
165: 'mount',
166: 'umount2',
167: 'swapon',
168: 'swapoff',
169: 'reboot',
170: 'sethostname',
171: 'setdomainname',
172: 'iopl',
173: 'ioperm',
174: 'create_module',
175: 'init_module',
176: 'delete_module',
177: 'get_kernel_syms',
178: 'query_module',
179: 'quotactl',
180: 'nfsservctl',
181: 'getpmsg',
182: 'putpmsg',
183: 'afs_syscall',
184: 'tuxcall',
185: 'security',
186: 'gettid',
187: 'readahead',
188: 'setxattr',
189: 'lsetxattr',
190: 'fsetxattr',
191: 'getxattr',
192: 'lgetxattr',
193: 'fgetxattr',
194: 'listxattr',
195: 'llistxattr',
196: 'flistxattr',
197: 'removexattr',
198: 'lremovexattr',
199: 'fremovexattr',
200: 'tkill',
201: 'time',
202: 'futex',
203: 'sched_setaffinity',
204: 'sched_getaffinity',
205: 'set_thread_area',
206: 'io_setup',
207: 'io_destroy',
208: 'io_getevents',
209: 'io_submit',
210: 'io_cancel',
211: 'get_thread_area',
212: 'lookup_dcookie',
213: 'epoll_create',
214: 'epoll_ctl_old',
215: 'epoll_wait_old',
216: 'remap_file_pages',
217: 'getdents64',
218: 'set_tid_address',
219: 'restart_syscall',
220: 'semtimedop',
221: 'fadvise64',
222: 'timer_create',
223: 'timer_settime',
224: 'timer_gettime',
225: 'timer_getoverrun',
226: 'timer_delete',
227: 'clock_settime',
228: 'clock_gettime',
229: 'clock_getres',
230: 'clock_nanosleep',
231: 'exit_group',
232: 'epoll_wait',
233: 'epoll_ctl',
234: 'tgkill',
235: 'utimes',
236: 'vserver',
237: 'mbind',
238: 'set_mempolicy',
239: 'get_mempolicy',
240: 'mq_open',
241: 'mq_unlink',
242: 'mq_timedsend',
243: 'mq_timedreceive',
244: 'mq_notify',
245: 'mq_getsetattr',
246: 'kexec_load',
247: 'waitid',
248: 'add_key',
249: 'request_key',
250: 'keyctl',
251: 'ioprio_set',
252: 'ioprio_get',
253: 'inotify_init',
254: 'inotify_add_watch',
255: 'inotify_rm_watch',
256: 'migrate_pages',
257: 'openat',
258: 'mkdirat',
259: 'mknodat',
260: 'fchownat',
261: 'futimesat',
262: 'newfstatat',
263: 'unlinkat',
264: 'renameat',
265: 'linkat',
266: 'symlinkat',
267: 'readlinkat',
268: 'fchmodat',
269: 'faccessat',
270: 'pselect6',
271: 'ppoll',
272: 'unshare',
273: 'set_robust_list',
274: 'get_robust_list',
275: 'splice',
276: 'tee',
277: 'sync_file_range',
278: 'vmsplice',
279: 'move_pages',
280: 'utimensat',
281: 'epoll_pwait',
282: 'signalfd',
283: 'timerfd_create',
284: 'eventfd',
285: 'fallocate',
286: 'timerfd_settime',
287: 'timerfd_gettime',
288: 'accept4',
289: 'signalfd4',
290: 'eventfd2',
291: 'epoll_create1',
292: 'dup3',
293: 'pipe2',
294: 'inotify_init1',
295: 'preadv',
296: 'pwritev',
297: 'rt_tgsigqueueinfo',
298: 'perf_event_open',
299: 'recvmmsg',
300: 'fanotify_init',
301: 'fanotify_mark',
302: 'prlimit64',
303: 'name_to_handle_at',
304: 'open_by_handle_at',
305: 'clock_adjtime',
306: 'syncfs',
307: 'sendmmsg',
308: 'setns',
309: 'getcpu',
310: 'process_vm_readv',
311: 'process_vm_writev',
312: 'kcmp',
313: 'finit_module',
314: 'sched_setattr',
315: 'sched_getattr',
316: 'renameat2',
317: 'seccomp',
318: 'getrandom',
319: 'memfd_create',
320: 'kexec_file_load',
321: 'bpf',
322: 'execveat',
323: 'userfaultfd',
324: 'membarrier',
325: 'mlock2',
326: 'copy_file_range',
327: 'preadv2',
328: 'pwritev2',
329: 'pkey_mprotect',
330: 'pkey_alloc',
331: 'pkey_free',
332: 'statx',
}
|
_fields_ = (
('si_band', ctypes.c_long),
('si_fd', ctypes.c_int),
)
|
test.py
|
# flake8: noqa
# DEV: Skip linting, we lint with Python 2, we'll get SyntaxErrors from `yield from`
# stdlib
import time
import asyncio
# 3p
import aiopg
from psycopg2 import extras
# project
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.aiopg.patch import patch, unpatch
from ddtrace import Pin
# testing
from tests.opentracer.utils import init_tracer
from tests.contrib.config import POSTGRES_CONFIG
from tests.test_tracer import get_dummy_tracer
from tests.contrib.asyncio.utils import AsyncioTestCase, mark_asyncio
TEST_PORT = str(POSTGRES_CONFIG['port'])
class AiopgTestCase(AsyncioTestCase):
# default service
TEST_SERVICE = 'postgres'
def setUp(self):
super().setUp()
self._conn = None
patch()
def tearDown(self):
super().tearDown()
if self._conn and not self._conn.closed:
self._conn.close()
unpatch()
@asyncio.coroutine
def _get_conn_and_tracer(self):
conn = self._conn = yield from aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(tracer=self.tracer).onto(conn)
return conn, self.tracer
@asyncio.coroutine
def assert_conn_is_traced(self, tracer, db, service):
# ensure the trace aiopg client doesn't add non-standard
# methods
try:
yield from db.execute('select \'foobar\'')
except AttributeError:
pass
writer = tracer.writer
# Ensure we can run a query and it's correctly traced
q = 'select \'foobarblah\''
start = time.time()
cursor = yield from db.cursor()
yield from cursor.execute(q)
rows = yield from cursor.fetchall()
end = time.time()
assert rows == [('foobarblah',)]
assert rows
spans = writer.pop()
assert spans
assert len(spans) == 1
span = spans[0]
assert span.name == 'postgres.query'
assert span.resource == q
assert span.service == service
assert span.meta['sql.query'] == q
assert span.error == 0
assert span.span_type == 'sql'
assert start <= span.start <= end
assert span.duration <= end - start
# Ensure OpenTracing compatibility
ot_tracer = init_tracer('aiopg_svc', tracer)
with ot_tracer.start_active_span('aiopg_op'):
cursor = yield from db.cursor()
yield from cursor.execute(q)
rows = yield from cursor.fetchall()
assert rows == [('foobarblah',)]
spans = writer.pop()
assert len(spans) == 2
ot_span, dd_span = spans
# confirm the parenting
assert ot_span.parent_id == None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.name == 'aiopg_op'
assert ot_span.service == 'aiopg_svc'
assert dd_span.name == 'postgres.query'
assert dd_span.resource == q
assert dd_span.service == service
assert dd_span.meta['sql.query'] == q
assert dd_span.error == 0
assert dd_span.span_type == 'sql'
# run a query with an error and ensure all is well
q = 'select * from some_non_existant_table'
cur = yield from db.cursor()
try:
yield from cur.execute(q)
except Exception:
pass
else:
assert 0, 'should have an error'
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
span = spans[0]
assert span.name == 'postgres.query'
assert span.resource == q
assert span.service == service
assert span.meta['sql.query'] == q
assert span.error == 1
# assert span.meta['out.host'] == 'localhost'
assert span.meta['out.port'] == TEST_PORT
assert span.span_type == 'sql'
@mark_asyncio
def test_disabled_execute(self):
conn, tracer = yield from self._get_conn_and_tracer()
tracer.enabled = False
# these calls were crashing with a previous version of the code.
yield from (yield from conn.cursor()).execute(query='select \'blah\'')
yield from (yield from conn.cursor()).execute('select \'blah\'')
assert not tracer.writer.pop()
@mark_asyncio
def test_manual_wrap_extension_types(self):
conn, _ = yield from self._get_conn_and_tracer()
# NOTE: this will crash if it doesn't work.
# _ext.register_type(_ext.UUID, conn_or_curs)
# TypeError: argument 2 must be a connection, cursor or None
extras.register_uuid(conn_or_curs=conn)
@mark_asyncio
def
|
(self):
tracer = get_dummy_tracer()
services = ['db', 'another']
for service in services:
conn, _ = yield from self._get_conn_and_tracer()
Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn)
yield from self.assert_conn_is_traced(tracer, conn, service)
conn.close()
# ensure we have the service types
service_meta = tracer.writer.pop_services()
expected = {}
assert service_meta == expected
@mark_asyncio
def test_patch_unpatch(self):
tracer = get_dummy_tracer()
writer = tracer.writer
# Test patch idempotence
patch()
patch()
service = 'fo'
conn = yield from aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn)
yield from (yield from conn.cursor()).execute('select \'blah\'')
conn.close()
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
# Test unpatch
unpatch()
conn = yield from aiopg.connect(**POSTGRES_CONFIG)
yield from (yield from conn.cursor()).execute('select \'blah\'')
conn.close()
spans = writer.pop()
assert not spans, spans
# Test patch again
patch()
conn = yield from aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn)
yield from (yield from conn.cursor()).execute('select \'blah\'')
conn.close()
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
class AiopgAnalyticsTestCase(AiopgTestCase):
@asyncio.coroutine
def trace_spans(self):
service = 'db'
conn, _ = yield from self._get_conn_and_tracer()
Pin.get_from(conn).clone(service='db', tracer=self.tracer).onto(conn)
cursor = yield from conn.cursor()
yield from cursor.execute('select \'foobar\'')
rows = yield from cursor.fetchall()
assert rows
return self.get_spans()
@mark_asyncio
def test_analytics_default(self):
spans = yield from self.trace_spans()
self.assertEqual(len(spans), 1)
self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY))
@mark_asyncio
def test_analytics_with_rate(self):
with self.override_config(
'aiopg',
dict(analytics_enabled=True, analytics_sample_rate=0.5)
):
spans = yield from self.trace_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5)
@mark_asyncio
def test_analytics_without_rate(self):
with self.override_config(
'aiopg',
dict(analytics_enabled=True)
):
spans = yield from self.trace_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0)
|
test_connect_factory
|
slidesReducer.ts
|
import {SlideAPIInfo, SlideReducerAction, SlideReducerState, SlideSection} from "../types/slideTypes";
export const slideReducerInitialState: SlideReducerState = {
templateSectionTitles: [
{ title: "Introduction", subtitles: ["Background", "Problem", "Workflow"]},
{ title: "EDA", subtitles: ["Data Source", "Preview the raw data values", "Data cleaning", "Exploratory data analysis", "Exploratory data analysis: Example"]},
{ title: "Feature Engineering", subtitles: ["Feature Engineering Summarization", "Feature Engineering Example"]},
{ title: "Model Details", subtitles: ['Model Input', 'Model Output', 'Optimization', 'Model Alternatives', "Model Details"]},
{ title: "Model Performance", subtitles: ['Metrics', 'Performance']},
{ title: "Conclusion", subtitles: ['Suggestions', 'Ethical & Legal consideration', "Limitation & Risks"]}
],
sectionSubtitles: {},
sectionTitles: [],
sectionPoints: {},
sectionCodeCells: {},
sectionImages: {},
exampleSubsections: []
}
function
|
(state: SlideReducerState, action: SlideReducerAction): SlideReducerState {
switch (action.type) {
case "updateSlides":
updateSlides(state, action.payload)
return state
}
}
function updateSlides(state: SlideReducerState, payload: SlideAPIInfo) {
console.log("Update slides")
state.sectionTitles = []
state.templateSectionTitles = payload.template
state.templateSectionTitles.forEach((section: SlideSection) => {
if (section.title in payload) {
state.sectionTitles.push(section.title)
const points = payload.slidesContent[section.title].points
const cells = payload.slidesContent[section.title].cells
// @ts-ignore
state.sectionPoints[title] = points
// @ts-ignore
state.sectionCodeCells[title] = cells
}
})
console.log("state", state)
}
export default slideReducer;
|
slideReducer
|
link.ts
|
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* OpenAPI spec version: 1.1.1
* Contact: [email protected]
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
export class Link {
'_class'?: string;
|
static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [
{
"name": "_class",
"baseName": "_class",
"type": "string"
},
{
"name": "href",
"baseName": "href",
"type": "string"
} ];
static getAttributeTypeMap() {
return Link.attributeTypeMap;
}
}
|
'href'?: string;
static discriminator: string | undefined = undefined;
|
bin.py
|
import os, json, sys, re, random, time
import requests, inquirer
from jinja2 import Template
from termcolor import colored
root = os.path.dirname(os.path.realpath(__file__))
JSON_FILE = root + '/json.json'
ARR_FILE = root + '/arr.json'
MAP = {
'ๅ่ฏ': 'n.',
'ๅจ่ฏ': 'vt.',
'ๅฝขๅฎน่ฏ': 'adj.',
'ๅฏ่ฏ': 'adv.',
'ไป่ฏ': 'prep.',
'ไปฃ่ฏ': 'pron.'
}
def isChinese(text):
if u'\u
|
text <= u'\u9fff':
return True
return False
def translate(text):
params = {
'client': 'gtx',
'sl': 'en',
'tl': 'zh-CN',
'q': text,
'ie': 'UTF-8'
# 'format': 'text',
# 'model': 'base',
# 'target': 'zh-CN',
# 'key': ''
}
if isChinese(text) == True:
params['sl'] = 'zh-CN'
params['tl'] = 'en'
# t - ๆบtext็็ฟป่ฏ
# at - ไผ้ขๅค่ฟๅไธไบ่ฟไน่ฏ
# ex - examples
# ss - ๅฆๆ็ฟป่ฏ็ๆฏๅไธช่ฏ๏ผไผ่ฟๅไธ่ฏฅ่ฏ็ธๅ
ณ็ๅจ่ฏใๅฝขๅฎน่ฏใๅ่ฏ
# md - ๅฆๆ็ฟป่ฏ็ๆฏๅไธช่ฏ๏ผ่ฟๅ่ฏฅ่ฏ็ๅฎไน
# rw - ็ป่ฏ
# bd - ่ฏไน
# rm - ๅ้ณ
req = requests.get('http://translate.google.cn/translate_a/single?dt=t&dt=rm&dt=bd&dt=rw&dt=ex', params=params)
# req = requests.post('https://translation.googleapis.com/language/translate/v2', params=params)
content = json.loads(req.content)
leng = len(content)
# print(content)
target = ''
source = ''
phonetic = ''
word = []
phrase = []
example = []
try:
if 1 < leng and content[1]:
for w in content[1]:
ww = {
'type': w[0],
'text': '๏ผ'.join(w[1])
}
if w[0] in MAP:
ww['type'] = MAP[w[0]]
word.append(ww)
if 14 < leng and content[14] and content[14][0]:
phrase = [ph for ph in content[14][0]]
if 13 < leng and content[13] and content[13][0]:
example = [ex[0] for ex in content[13][0]]
target = content[0][0][0]
source = content[0][0][1]
phonetic = content[0][1][3]
except IndexError as e:
print(e)
pass
result = {
'target': target,
'source': source,
'phonetic': phonetic,
'word': word,
'phrase': phrase,
'example': example
}
return result
def cmdText(content):
print(
colored(content['source'], 'red') + colored(' [' + content['phonetic'] + '] ' + content['target'], 'green') + '\n')
for w in content['word']:
print(colored(w['type'], 'cyan') + ' ' + colored(w['text'], 'red'))
print('\n' + colored('่ฏ็ป๏ผ', 'cyan') + colored('[' + '] ['.join(content['phrase']) + ']', 'yellow') + '\n')
leng = len(content['example'])
if leng > 5:
leng = 5
for i in range(leng):
ex = content['example'][i]
w = re.sub(r'^(.*)<b>(.+)</b>(.*)$', lambda a: a[1] + colored(a[2], 'red') + a[3], ex)
print(colored('ex๏ผ', 'cyan') + ' ' + w)
def saveWord(content):
leng = len(content['word'])
if isChinese(content['source']) == True or leng == 0:
return
word = content['source']
saveFile([word])
def saveFile(words):
with open(JSON_FILE, 'r') as f:
j = json.load(f)
with open(ARR_FILE, 'r') as f:
a = json.load(f)
for word in words:
if word in j:
break
else:
j[word] = True
a.append(word)
with open(JSON_FILE, 'w') as f:
json.dump(j, f)
with open(ARR_FILE, 'w') as f:
json.dump(a, f)
def getWord(num=50):
with open(ARR_FILE, 'r') as f:
a = json.load(f)
if len(a) < num:
num = len(a)
words = []
tmpJson = {}
def getRandomWord():
index = random.randint(0, num - 1)
word = a[index]
if word in tmpJson:
return getRandomWord()
words.append(word)
tmpJson[word] = True
return word
for index in range(num):
getRandomWord()
return words
def getWordData(arr):
contents = [translate(word) for word in arr]
return contents
def makeWordList(argv=sys.argv):
num = 50
if len(argv) >= 2:
num = int(argv[1])
str = ''
with open(root + '/_index.html', 'r') as f:
str = f.read()
firstTime = time.time()
data = getWordData(getWord(num))
temp = Template(str)
htmlStr = temp.render({"data": data})
fileName = 'words_' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + '.html'
with open(os.getcwd() + '/' + fileName, 'w') as f:
f.write(htmlStr)
lastTime = time.time()
print(lastTime - firstTime)
print('make html: ' + fileName)
sys.exit(0)
def copyWords(argv=sys.argv):
fileName = 'copy.json'
if len(argv) >= 2:
fileName = argv[1]
with open(ARR_FILE, 'r') as f:
a = json.load(f)
with open(os.getcwd() + '/' + fileName, 'w') as f:
json.dump(a, f)
print('make json: ' + fileName)
sys.exit(0)
def joinWords(argv=sys.argv):
fileName = 'join.json'
if len(argv) >= 2:
fileName = argv[1]
with open(os.getcwd() + '/' + fileName, 'r') as f:
a = json.load(f)
if isinstance(a, list) == False:
print(fileName+' is error')
return
saveFile(a)
print('copy json: ' + fileName)
sys.exit(0)
def removeWord():
message = ''
questions = [
inquirer.Text('ask', message=message)
]
answers = inquirer.prompt(questions)
try:
word = answers['ask']
except Exception:
sys.exit(1)
print(word)
sys.exit(0)
def execute(argv=sys.argv):
'''
ๅฏๅจๅฝๆฐ
:param argv: ๅๆฐๅ่กจ(้ป่ฎคไธบsys.argvๅๆฐ)
:return: None
'''
content = translate(' '.join(argv[1:]))
cmdText(content)
saveWord(content)
sys.exit(0)
|
4e00' <=
|
admin.py
|
import traceback
from utils import *
async def assign_default_role(client, member, role_name):
roles = list(filter(lambda k: k.name == role_name, member.server.roles))
if len(roles) == 0:
return
await client.add_roles(member, roles[0])
async def notify_of_leaving_person(client, member):
bot_channel = get_server(member.server).bot_channel
await client.send_message(bot_channel, '**{}** just left {}. Bye, bye!'.format(member.name, member.server))
async def notify_of_joining_person(client, member):
bot_channel = get_server(member.server).bot_channel
await client.send_message(bot_channel, '**{}** just joined {}. Welcome!'.format(member.name, member.server))
# Used for broadcasting Ohminator announcements
@register_command("broadcast")
async def broadcast(message, bot_channel, client):
await client.delete_message(message)
if message.author.id != "159315181288030208":
await client.send_message(bot_channel,
"{}: Sorry, this command is only for the author of Ohminator!".format(
message.author.name))
return
split_message = message.content.split()
if len(split_message) > 2:
# If all is written instead of channel id, all bot-spam channels will be messaged
if split_message[1] == "all":
for channel in map(lambda s: s.bot_channel, server_list):
await client.send_message(channel, "**Announcement**: {}".format(" ".join(split_message[2:])))
else:
channel = client.get_channel(split_message[1])
if channel:
await client.send_message(channel, "**Announcement**: {}".format(" ".join(split_message[2:])))
else:
servers = list(filter(lambda s: s.name == split_message[1] or s.id == split_message[1], server_list))
if len(servers) > 0:
for server in servers:
await client.send_message(server.bot_channel,
"**Announcement**: {}".format(" ".join(split_message[2:])))
else:
await client.send_message(bot_channel,
"{}: No channel with the given ID or server with the given ID or name."
.format(message.author.name))
else:
await client.send_message(bot_channel,
"{}: Use: !broadcast [all/channel id/server name] [announcement]"
.format(message.author.name))
@register_command("move")
async def move(message, bot_channel, client):
await client.delete_message(message)
parameters = message.content.split()
if message.author.id == "184635136724303873" or message.author.id == "159315181288030208":
member = message.author.server.get_member("159315181288030208")
if member and message.author.voice_channel and member.voice_channel:
channel = message.author.voice_channel
if len(parameters) > 1:
try:
channel = message.author.server.get_channel(parameters[1])
except:
return
try:
await client.move_member(member=member, channel=channel)
except:
traceback.print_exc()
@register_command("settings")
async def settings(message, bot_channel, client):
await client.delete_message(message)
tokens = message.content.split()
if len(tokens) < 2:
await client.send_message(message.channel,
'{}: Usage !settings [client name or id] [([permission to change]'
' [value to change to])]'.format(message.author.name))
return
server = get_server(message.server)
if tokens[1] == message.server.id:
settings_source = server
else:
settings_source = server.get_channel(tokens[1])
if len(tokens) < 3:
# No other arguments -> list all settings for given channel
settings_str = "Settings for {} {}:".format("server" if settings_source == server else "channel", settings_source.name)
for key, val in settings_source.list_settings().items():
settings_str += "\n{}: {}".format(key, val)
await client.send_message(message.channel,
'{}: {}'.format(message.author.name, settings_str))
elif len(tokens) < 4:
await client.send_message(message.channel,
'{}: Usage !settings [client/server name or id] [([permission to change]'
' [value to change to])]'.format(message.author.name))
else:
if tokens[2] in settings_source.list_settings().keys():
settings_source.change_settings({tokens[2] : tokens[3]})
await client.send_message(message.channel,
'{}: The setting {} har been changed to {}.'.format(message.author.name, tokens[2], tokens[3]))
else:
await client.send_message(message.channel,
'{}: The setting {} does not exist.'.format(message.author.name, tokens[2]))
@register_command("getbotinvite", "gbi")
async def get_bot_invite(message, bot_channel, client):
await client.delete_message(message)
permissions = discord.Permissions.all()
await client.send_message(message.channel,
'{}: {}'.format(message.author.name,
discord.utils.oauth_url('176432800331857920', permissions=permissions)))
@register_command("suggest")
async def
|
(message, bot_channel, client):
suggestion = message.content[9:]
if len(suggestion) < 3:
await client.send_message(bot_channel,
"{}: Please suggest something proper.".format(message.author.mention))
return
server = get_server(message.server)
member = server.get_member(message.author.id)
suggestion_loc = 'suggestions.txt'.format(server.server_loc, member.member_loc)
with open(suggestion_loc, 'a') as f:
f.write("Suggestion from {} on server {}:\n{}\n".format(message.author, message.server, suggestion))
await client.send_message(bot_channel,
'{}: Your suggestion has been noted. Thank you!'.format(message.author.mention))
async def print_page(resource, message, bot_channel, client, prefix_user=True):
if resource == 'web-page-ad':
content = "**Go to http://www.ohminator.com for a web version of the documentation.**"
else:
with open('resources/{}'.format(resource)) as f:
content = f.read()
help_page = "{}{}".format("{}:\n".format(message.author.name) if prefix_user else "", content)
await client.send_message(bot_channel, help_page)
@register_command("help", "commands", "command", "info")
async def help(message, bot_channel, client):
await client.delete_message(message)
async def print_help_page(help_resource, prefix_user=True):
return await print_page(help_resource, message, bot_channel, client, prefix_user)
if message.content.lower().startswith('!help audio'):
await print_help_page('help_audio.txt')
elif message.content.lower().startswith('!help intro'):
await print_help_page('help_intro.txt')
elif message.content.lower().startswith('!help util'):
await print_help_page('help_utils.txt')
elif message.content.lower().startswith('!help other'):
await print_help_page('help_others.txt')
elif message.content.lower().startswith('!help all'):
await print_help_page('help_all_1.txt')
await print_help_page('help_all_2.txt', False)
await print_help_page('help_all_3.txt', False)
elif message.content.lower().startswith('!help wow'):
await print_help_page('help_wow.txt')
else:
await print_help_page('web-page-ad')
await print_help_page('help.txt', False)
await print_help_page('summary.txt', False)
@register_command("summary")
async def summary(message, bot_channel, client):
await client.delete_message(message)
return await print_page('summary.txt', message, bot_channel, client)
@register_command("showtotalusers")
async def show_total_number_users(message, bot_channel, client):
await client.delete_message(message)
servers = sum(1 for _ in client.servers)
users = sum(1 for _ in client.get_all_members())
await client.send_message(bot_channel, "{}: Ohminator is currently serving {} server{}, {} user{}.".format(
message.author.name, servers, "s" if servers != 1 else "", users, "s" if users != 1 else ""))
|
suggest
|
lib.rs
|
use crate::javascript_service::JavascriptService;
use crate::js_value::object::JsObject;
use crate::runtime::JsRuntime;
use fruity_core::resource::resource_container::ResourceContainer;
use fruity_core::settings::Settings;
use std::sync::Arc;
mod bridge;
pub mod error;
mod exception;
pub mod javascript_service;
mod js_value;
mod module_map;
mod normalize_path;
mod runtime;
mod serialize;
mod thread_scope_stack;
/// The module name
pub static MODULE_NAME: &str = "fruity_javascript";
// #[no_mangle]
pub fn initialize(resource_container: Arc<ResourceContainer>, _settings: &Settings)
|
{
let javascript_service = JavascriptService::new(resource_container.clone());
resource_container.add::<JavascriptService>("javascript_service", Box::new(javascript_service));
}
|
|
models.py
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from jsngram.users import models as user_models
from jsngram.images import models as image_models
class Notification(image_models.TimeStampedModel):
|
TYPE_CHOICES = (
('like', 'Like'), # ์ฒซ ๋ฒ์งธ๋ ๋ฐ์ดํฐ๋ฒ ์ด์ค๋ฅผ ์ํด, ๋ ๋ฒ์งธ๋ ์ด๋๋ฏผ ํจ๋์ ์ํด ์ฐ๋ ๊ฒ.
('comment', 'Comment'),
('follow', 'Follow')
)
creator = models.ForeignKey(user_models.User, on_delete=models.PROTECT, related_name='creator')
to = models.ForeignKey(user_models.User, on_delete=models.PROTECT, related_name='to')
notification_type = models.CharField(max_length=20, choices=TYPE_CHOICES)
image = models.ForeignKey(image_models.Image, on_delete=models.PROTECT, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
class Meta:
ordering = ['-created_at']
def __str__(self):
return 'From: {} - To: {}'.format(self.creator, self.to)
|
|
test_med2img.py
|
from unittest import TestCase
from unittest import mock
from med2img.med2img import Med2img
class
|
(TestCase):
"""
Test Med2img.
"""
def setUp(self):
self.app = Med2img()
def test_run(self):
"""
Test the run code.
"""
args = []
if self.app.TYPE == 'ds':
args.append('inputdir') # you may want to change this inputdir mock
args.append('outputdir') # you may want to change this outputdir mock
# you may want to add more of your custom defined optional arguments to test
# your app with
# eg.
# args.append('--custom-int')
# args.append(10)
options = self.app.parse_args(args)
self.app.run(options)
# write your own assertions
self.assertEqual(options.outputdir, 'outputdir')
|
Med2imgTests
|
input-duration-slider.component.ts
|
import {
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
EventEmitter,
Input,
OnDestroy,
OnInit,
Output,
ViewChild
} from '@angular/core';
import shortid from 'shortid';
import * as moment from 'moment-mini';
import { dotAnimation } from './dot.ani';
@Component({
selector: 'input-duration-slider',
templateUrl: './input-duration-slider.component.html',
styleUrls: ['./input-duration-slider.component.scss'],
changeDetection: ChangeDetectionStrategy.OnPush,
animations: [dotAnimation],
})
export class InputDurationSliderComponent implements OnInit, OnDestroy {
minutesBefore = 0;
dots: any[];
uid: string = 'duration-input-slider' + shortid();
el: HTMLElement;
startHandler: (ev: any) => void;
endHandler: () => void;
moveHandler: (ev: any) => void;
@ViewChild('circleEl') circleEl: ElementRef;
@Input() label: string;
@Output() modelChange: EventEmitter<number> = new EventEmitter();
constructor(
private _el: ElementRef,
private _cd: ChangeDetectorRef,
) {
this.el = _el.nativeElement;
}
_model: number;
@Input() set model(val) {
if (this._model !== val) {
this._model = val;
this.setRotationFromValue(val);
}
}
ngOnInit() {
this.startHandler = (ev) => {
// don't execute when clicked on label or input
if (ev.target.tagName === 'LABEL' || ev.target.tagName === 'INPUT') {
this.endHandler();
return;
}
this.el.addEventListener('mousemove', this.moveHandler);
document.addEventListener('mouseup', this.endHandler);
this.el.addEventListener('touchmove', this.moveHandler);
document.addEventListener('touchend', this.endHandler);
this.el.classList.add('is-dragging');
};
this.moveHandler = (ev) => {
if (ev.type === 'click' &&
(ev.target.tagName === 'LABEL' ||
ev.target.tagName === 'INPUT')) {
return;
}
// prevent touchmove
ev.preventDefault();
function
|
(theta_) {
return 90 - theta_;
}
const centerX = this.circleEl.nativeElement.offsetWidth / 2;
const centerY = this.circleEl.nativeElement.offsetHeight / 2;
let offsetX;
let offsetY;
if (ev.type === 'touchmove') {
const rect = ev.target.getBoundingClientRect();
offsetX = ev.targetTouches[0].pageX - rect.left;
offsetY = ev.targetTouches[0].pageY - rect.top;
} else {
offsetX = ev.offsetX;
offsetY = ev.offsetY;
}
const x = offsetX - centerX;
const y = -1 * (offsetY - centerY);
const theta = Math.atan2(y, x) * (180 / Math.PI);
const cssDegrees = Math.round(convertThetaToCssDegrees(theta));
this.setValueFromRotation(cssDegrees);
};
this.endHandler = () => {
this.el.classList.remove('is-dragging');
this.el.removeEventListener('mousemove', this.moveHandler);
document.removeEventListener('mouseup', this.endHandler);
this.el.removeEventListener('touchmove', this.moveHandler);
document.removeEventListener('touchend', this.endHandler);
};
this.el.addEventListener('mousedown', this.startHandler);
this.el.addEventListener('touchstart', this.startHandler);
this.el.addEventListener('click', this.moveHandler);
this.setRotationFromValue();
}
ngOnDestroy() {
// remove mouse events
this.el.removeEventListener('mousedown', this.startHandler);
this.el.removeEventListener('mousemove', this.moveHandler);
document.removeEventListener('mouseup', this.endHandler);
// remove touch events
this.el.removeEventListener('touchstart', this.startHandler);
this.el.removeEventListener('touchmove', this.moveHandler);
document.removeEventListener('touchend', this.endHandler);
}
setCircleRotation(cssDegrees) {
this.circleEl.nativeElement.style.transform = 'rotate(' + cssDegrees + 'deg)';
}
setDots(hours = 0) {
if (hours > 12) {
hours = 12;
}
this.dots = new Array(hours);
}
setValueFromRotation(degrees) {
const THRESHOLD = 40;
let minutesFromDegrees;
// NOTE: values are negative for the last quadrant
if (degrees >= 0) {
minutesFromDegrees = (degrees / 360 * 60);
} else {
minutesFromDegrees = ((degrees + 360) / 360 * 60);
}
minutesFromDegrees = parseInt(minutesFromDegrees, 10);
minutesFromDegrees = Math.round(minutesFromDegrees / 5) * 5;
if (minutesFromDegrees >= 60) {
minutesFromDegrees = 0;
}
let hours = Math.floor(moment.duration({
milliseconds: this._model
}).asHours());
const minuteDelta = minutesFromDegrees - this.minutesBefore;
if (minuteDelta > THRESHOLD) {
hours--;
} else if ((-1 * minuteDelta) > THRESHOLD) {
hours++;
}
if (hours < 0) {
hours = 0;
minutesFromDegrees = 0;
this.setCircleRotation(0);
} else {
this.setCircleRotation(minutesFromDegrees * 6);
}
this.minutesBefore = minutesFromDegrees;
this.setDots(hours);
this._model = moment.duration({
hours: hours,
minutes: minutesFromDegrees
}).asMilliseconds();
this.modelChange.emit(this._model);
this._cd.detectChanges();
}
onInputChange($event) {
this._model = $event;
this.modelChange.emit(this._model);
this.setRotationFromValue();
}
setRotationFromValue(val = this._model) {
const momentVal = moment.duration({
milliseconds: val
});
const minutes = momentVal.minutes();
this.setDots(Math.floor(momentVal.asHours()));
const degrees = minutes * 360 / 60;
this.minutesBefore = minutes;
this.setCircleRotation(degrees);
this._cd.detectChanges();
}
}
|
convertThetaToCssDegrees
|
schedule_test.go
|
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sync
import (
"context"
"sync"
"testing"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/mgt"
"github.com/goharbor/harbor/src/jobservice/period"
"github.com/goharbor/harbor/src/lib/q"
"github.com/goharbor/harbor/src/pkg/scheduler"
"github.com/goharbor/harbor/src/pkg/task"
"github.com/goharbor/harbor/src/testing/mock"
ts "github.com/goharbor/harbor/src/testing/pkg/scheduler"
tt "github.com/goharbor/harbor/src/testing/pkg/task"
"github.com/stretchr/testify/suite"
)
// WorkerTestSuite is test suite for testing sync.Worker.
type WorkerTestSuite struct {
suite.Suite
worker *Worker
}
// TestWorker is the entry method of WorkerTestSuite.
func
|
(t *testing.T) {
suite.Run(t, &WorkerTestSuite{})
}
// SetupSuite sets up suite.
func (suite *WorkerTestSuite) SetupSuite() {
sysContext := context.TODO()
dao.PrepareTestForPostgresSQL()
getPolicies := func() ([]*period.Policy, error) {
return []*period.Policy{
// Dirty data in js datastore.
{
ID: "8ff2aabb977077b84b4d5f1b",
JobName: scheduler.JobNameScheduler,
CronSpec: "0 0 0 * * 0",
WebHookURL: "http://core:8080/service/notifications/tasks/250",
NumericID: 1630667250,
},
}, nil
}
// Mock methods
//
tss := &ts.Scheduler{}
tss.On("ListSchedules", mock.Anything, mock.Anything).Return([]*scheduler.Schedule{
{
ID: 550,
CRON: "0 0 0 * * *",
},
}, nil)
// The missing schedule in database.
tte := &tt.ExecutionManager{}
tte.On("List", mock.Anything, &q.Query{
Keywords: map[string]interface{}{
"vendor_type": scheduler.JobNameScheduler,
"vendor_id": (int64)(550),
},
}).Return([]*task.Execution{
{
ID: 1550,
},
}, nil)
ttm := &tt.Manager{}
ttm.On("List", mock.Anything, &q.Query{
Keywords: map[string]interface{}{
"execution_id": (int64)(1550),
},
}).Return([]*task.Task{
{
ID: 2550,
ExecutionID: 1550,
JobID: "f754ccdd123664b2acb971d9",
},
}, nil)
pms := &period.MockScheduler{}
pms.On("Schedule", &period.Policy{
ID: "f754ccdd123664b2acb971d9",
JobName: scheduler.JobNameScheduler,
CronSpec: "0 0 0 * * *",
WebHookURL: "http://core:8080/service/notifications/tasks/2550",
}).Return((int64)(1630667500), nil)
pms.On("UnSchedule", "8ff2aabb977077b84b4d5f1b").Return(nil)
mmm := &mgt.MockManager{}
mmm.On("SaveJob", mock.Anything).Return(nil)
suite.worker = New(3).
WithContext(&env.Context{
SystemContext: sysContext,
WG: &sync.WaitGroup{},
ErrorChan: make(chan error, 1),
}).UseCoreScheduler(tss).
UseCoreExecutionManager(tte).
UseCoreTaskManager(ttm).
UseScheduler(pms).
UseManager(mmm).
WithCoreInternalAddr("http://core:8080").
WithPolicyLoader(getPolicies)
}
// TestStart test Start().
func (suite *WorkerTestSuite) TestStart() {
err := suite.worker.Start()
suite.NoError(err, "start worker")
}
// TestRun test Run().
func (suite *WorkerTestSuite) TestRun() {
err := suite.worker.Run(context.TODO())
suite.NoError(err, "run worker")
}
|
TestWorker
|
queue_manager_test.go
|
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"context"
"fmt"
"io/ioutil"
"math"
"os"
"reflect"
"sort"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/go-kit/kit/log"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/stretchr/testify/require"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/util/testutil"
"github.com/prometheus/tsdb"
tsdbLabels "github.com/prometheus/tsdb/labels"
)
const defaultFlushDeadline = 1 * time.Minute
func TestSampleDelivery(t *testing.T) {
// Let's create an even number of send batches so we don't run into the
// batch timeout case.
n := config.DefaultQueueConfig.Capacity * 2
samples, series := createTimeseries(n)
c := NewTestStorageClient()
c.expectSamples(samples[:len(samples)/2], series)
cfg := config.DefaultQueueConfig
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
cfg.MaxShards = 1
dir, err := ioutil.TempDir("", "TestSampleDeliver")
testutil.Ok(t, err)
defer os.RemoveAll(dir)
m := NewQueueManager(nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, nil, nil, c, defaultFlushDeadline)
m.StoreSeries(series, 0)
// These should be received by the client.
m.Start()
m.Append(samples[:len(samples)/2])
defer m.Stop()
c.waitForExpectedSamples(t)
m.Append(samples[len(samples)/2:])
c.expectSamples(samples[len(samples)/2:], series)
c.waitForExpectedSamples(t)
}
func TestSampleDeliveryTimeout(t *testing.T) {
// Let's send one less sample than batch size, and wait the timeout duration
n := 9
samples, series := createTimeseries(n)
c := NewTestStorageClient()
cfg := config.DefaultQueueConfig
cfg.MaxShards = 1
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
dir, err := ioutil.TempDir("", "TestSampleDeliveryTimeout")
testutil.Ok(t, err)
defer os.RemoveAll(dir)
m := NewQueueManager(nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, nil, nil, c, defaultFlushDeadline)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
// Send the samples twice, waiting for the samples in the meantime.
c.expectSamples(samples, series)
m.Append(samples)
c.waitForExpectedSamples(t)
c.expectSamples(samples, series)
m.Append(samples)
c.waitForExpectedSamples(t)
}
func TestSampleDeliveryOrder(t *testing.T) {
ts := 10
n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
samples := make([]tsdb.RefSample, 0, n)
series := make([]tsdb.RefSeries, 0, n)
for i := 0; i < n; i++ {
name := fmt.Sprintf("test_metric_%d", i%ts)
samples = append(samples, tsdb.RefSample{
Ref: uint64(i),
T: int64(i),
V: float64(i),
})
series = append(series, tsdb.RefSeries{
Ref: uint64(i),
Labels: tsdbLabels.Labels{tsdbLabels.Label{Name: "__name__", Value: name}},
})
}
c := NewTestStorageClient()
c.expectSamples(samples, series)
dir, err := ioutil.TempDir("", "TestSampleDeliveryOrder")
testutil.Ok(t, err)
defer os.RemoveAll(dir)
m := NewQueueManager(nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
// These should be received by the client.
m.Append(samples)
c.waitForExpectedSamples(t)
}
func TestShutdown(t *testing.T) {
deadline := 1 * time.Second
c := NewTestBlockedStorageClient()
dir, err := ioutil.TempDir("", "TestShutdown")
testutil.Ok(t, err)
defer os.RemoveAll(dir)
m := NewQueueManager(nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, deadline)
samples, series := createTimeseries(2 * config.DefaultQueueConfig.MaxSamplesPerSend)
m.StoreSeries(series, 0)
m.Start()
// Append blocks to guarantee delivery, so we do it in the background.
go func() {
m.Append(samples)
}()
time.Sleep(100 * time.Millisecond)
// Test to ensure that Stop doesn't block.
start := time.Now()
m.Stop()
// The samples will never be delivered, so duration should
// be at least equal to deadline, otherwise the flush deadline
// was not respected.
duration := time.Since(start)
if duration > time.Duration(deadline+(deadline/10)) {
t.Errorf("Took too long to shutdown: %s > %s", duration, deadline)
}
if duration < time.Duration(deadline) {
t.Errorf("Shutdown occurred before flush deadline: %s < %s", duration, deadline)
}
}
func TestSeriesReset(t *testing.T) {
c := NewTestBlockedStorageClient()
deadline := 5 * time.Second
numSegments := 4
numSeries := 25
dir, err := ioutil.TempDir("", "TestSeriesReset")
testutil.Ok(t, err)
defer os.RemoveAll(dir)
m := NewQueueManager(nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, deadline)
for i := 0; i < numSegments; i++ {
series := []tsdb.RefSeries{}
for j := 0; j < numSeries; j++ {
series = append(series, tsdb.RefSeries{Ref: uint64((i * 100) + j), Labels: tsdbLabels.Labels{{Name: "a", Value: "a"}}})
}
m.StoreSeries(series, i)
}
testutil.Equals(t, numSegments*numSeries, len(m.seriesLabels))
m.SeriesReset(2)
testutil.Equals(t, numSegments*numSeries/2, len(m.seriesLabels))
}
func TestReshard(t *testing.T) {
size := 10 // Make bigger to find more races.
n := config.DefaultQueueConfig.Capacity * size
samples, series := createTimeseries(n)
c := NewTestStorageClient()
c.expectSamples(samples, series)
cfg := config.DefaultQueueConfig
cfg.MaxShards = 1
dir, err := ioutil.TempDir("", "TestReshard")
testutil.Ok(t, err)
defer os.RemoveAll(dir)
m := NewQueueManager(nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, nil, nil, c, defaultFlushDeadline)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
go func() {
for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity {
sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity])
require.True(t, sent)
time.Sleep(100 * time.Millisecond)
}
}()
for i := 1; i < len(samples)/config.DefaultQueueConfig.Capacity; i++ {
m.shards.stop()
m.shards.start(i)
time.Sleep(100 * time.Millisecond)
}
c.waitForExpectedSamples(t)
}
func TestReshardRaceWithStop(t *testing.T) {
c := NewTestStorageClient()
var m *QueueManager
h := sync.Mutex{}
h.Lock()
go func() {
for {
m = NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline)
m.Start()
h.Unlock()
h.Lock()
m.Stop()
}
}()
for i := 1; i < 100; i++ {
|
m.reshardChan <- i
h.Unlock()
}
}
func createTimeseries(n int) ([]tsdb.RefSample, []tsdb.RefSeries) {
samples := make([]tsdb.RefSample, 0, n)
series := make([]tsdb.RefSeries, 0, n)
for i := 0; i < n; i++ {
name := fmt.Sprintf("test_metric_%d", i)
samples = append(samples, tsdb.RefSample{
Ref: uint64(i),
T: int64(i),
V: float64(i),
})
series = append(series, tsdb.RefSeries{
Ref: uint64(i),
Labels: tsdbLabels.Labels{{Name: "__name__", Value: name}},
})
}
return samples, series
}
func getSeriesNameFromRef(r tsdb.RefSeries) string {
for _, l := range r.Labels {
if l.Name == "__name__" {
return l.Value
}
}
return ""
}
type TestStorageClient struct {
receivedSamples map[string][]prompb.Sample
expectedSamples map[string][]prompb.Sample
wg sync.WaitGroup
mtx sync.Mutex
}
func NewTestStorageClient() *TestStorageClient {
return &TestStorageClient{
receivedSamples: map[string][]prompb.Sample{},
expectedSamples: map[string][]prompb.Sample{},
}
}
func (c *TestStorageClient) expectSamples(ss []tsdb.RefSample, series []tsdb.RefSeries) {
c.mtx.Lock()
defer c.mtx.Unlock()
c.expectedSamples = map[string][]prompb.Sample{}
c.receivedSamples = map[string][]prompb.Sample{}
for _, s := range ss {
seriesName := getSeriesNameFromRef(series[s.Ref])
c.expectedSamples[seriesName] = append(c.expectedSamples[seriesName], prompb.Sample{
Timestamp: s.T,
Value: s.V,
})
}
c.wg.Add(len(ss))
}
func (c *TestStorageClient) waitForExpectedSamples(t *testing.T) {
c.wg.Wait()
c.mtx.Lock()
defer c.mtx.Unlock()
for ts, expectedSamples := range c.expectedSamples {
if !reflect.DeepEqual(expectedSamples, c.receivedSamples[ts]) {
t.Fatalf("%s: Expected %v, got %v", ts, expectedSamples, c.receivedSamples[ts])
}
}
}
func (c *TestStorageClient) Store(_ context.Context, req []byte) error {
c.mtx.Lock()
defer c.mtx.Unlock()
reqBuf, err := snappy.Decode(nil, req)
if err != nil {
return err
}
var reqProto prompb.WriteRequest
if err := proto.Unmarshal(reqBuf, &reqProto); err != nil {
return err
}
count := 0
for _, ts := range reqProto.Timeseries {
var seriesName string
labels := labelProtosToLabels(ts.Labels)
for _, label := range labels {
if label.Name == "__name__" {
seriesName = label.Value
}
}
for _, sample := range ts.Samples {
count++
c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample)
}
}
c.wg.Add(-count)
return nil
}
func (c *TestStorageClient) Name() string {
return "teststorageclient"
}
// TestBlockingStorageClient is a queue_manager StorageClient which will block
// on any calls to Store(), until the request's Context is cancelled, at which
// point the `numCalls` property will contain a count of how many times Store()
// was called.
type TestBlockingStorageClient struct {
numCalls uint64
}
func NewTestBlockedStorageClient() *TestBlockingStorageClient {
return &TestBlockingStorageClient{}
}
func (c *TestBlockingStorageClient) Store(ctx context.Context, _ []byte) error {
atomic.AddUint64(&c.numCalls, 1)
<-ctx.Done()
return nil
}
func (c *TestBlockingStorageClient) NumCalls() uint64 {
return atomic.LoadUint64(&c.numCalls)
}
func (c *TestBlockingStorageClient) Name() string {
return "testblockingstorageclient"
}
func BenchmarkStartup(b *testing.B) {
dir := os.Getenv("WALDIR")
if dir == "" {
return
}
// Find the second largest segment; we will replay up to this.
// (Second largest as WALWatcher will start tailing the largest).
dirents, err := ioutil.ReadDir(dir)
testutil.Ok(b, err)
var segments []int
for _, dirent := range dirents {
if i, err := strconv.Atoi(dirent.Name()); err != nil {
segments = append(segments, i)
}
}
sort.Ints(segments)
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
logger = log.With(logger, "caller", log.DefaultCaller)
for n := 0; n < b.N; n++ {
c := NewTestBlockedStorageClient()
m := NewQueueManager(logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration),
config.DefaultQueueConfig, nil, nil, c, 1*time.Minute)
m.watcher.startTime = math.MaxInt64
m.watcher.maxSegment = segments[len(segments)-2]
err := m.watcher.run()
testutil.Ok(b, err)
}
}
func TestProcessExternalLabels(t *testing.T) {
for _, tc := range []struct {
labels tsdbLabels.Labels
externalLabels labels.Labels
expected labels.Labels
}{
// Test adding labels at the end.
{
labels: tsdbLabels.Labels{{Name: "a", Value: "b"}},
externalLabels: labels.Labels{{Name: "c", Value: "d"}},
expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}},
},
// Test adding labels at the beginning.
{
labels: tsdbLabels.Labels{{Name: "c", Value: "d"}},
externalLabels: labels.Labels{{Name: "a", Value: "b"}},
expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}},
},
// Test we don't override existing labels.
{
labels: tsdbLabels.Labels{{Name: "a", Value: "b"}},
externalLabels: labels.Labels{{Name: "a", Value: "c"}},
expected: labels.Labels{{Name: "a", Value: "b"}},
},
} {
require.Equal(t, tc.expected, processExternalLabels(tc.labels, tc.externalLabels))
}
}
|
h.Lock()
|
transport.go
|
/*
Copyright 2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"crypto/tls"
"net"
"net/http"
"net/url"
"path"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/types"
apievents "github.com/gravitational/teleport/api/types/events"
"github.com/gravitational/teleport/api/types/wrappers"
"github.com/gravitational/teleport/lib"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/oxy/forward"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
)
// transportConfig is configuration for a rewriting transport.
type transportConfig struct {
uri string
publicAddr string
publicPort string
insecureSkipVerify bool
cipherSuites []uint16
jwt string
rewrite *types.Rewrite
w events.StreamWriter
traits wrappers.Traits
log logrus.FieldLogger
}
// Check validates configuration.
func (c *transportConfig) Check() error {
if c.w == nil {
return trace.BadParameter("stream writer missing")
}
if c.uri == "" {
return trace.BadParameter("uri missing")
}
if c.publicAddr == "" {
return trace.BadParameter("public addr missing")
}
if c.publicPort == "" {
return trace.BadParameter("public port missing")
}
if c.jwt == "" {
return trace.BadParameter("jwt missing")
}
if c.log == nil {
c.log = logrus.WithField(trace.Component, "transport")
}
return nil
}
// transport is a rewriting http.RoundTripper that can audit and forward
// requests to an internal application.
type transport struct {
closeContext context.Context
c *transportConfig
tr http.RoundTripper
uri *url.URL
ws *websocketTransport
}
// newTransport creates a new transport.
func newTransport(ctx context.Context, c *transportConfig) (*transport, error) {
if err := c.Check(); err != nil {
return nil, trace.Wrap(err)
}
// Parse the target address once then inject it into all requests.
uri, err := url.Parse(c.uri)
if err != nil {
return nil, trace.Wrap(err)
}
// Clone and configure the transport.
tr, err := defaults.Transport()
if err != nil {
return nil, trace.Wrap(err)
}
tr.TLSClientConfig, err = configureTLS(c)
if err != nil {
return nil, trace.Wrap(err)
}
return &transport{
closeContext: ctx,
c: c,
uri: uri,
tr: tr,
ws: newWebsocketTransport(uri, tr.TLSClientConfig),
}, nil
}
// RoundTrip will rewrite the request, forward the request to the target
// application, emit an event to the audit log, then rewrite the response.
func (t *transport) RoundTrip(r *http.Request) (*http.Response, error) {
// Check if the request path needs re-writing. This occurs when the URI
// contains a path like http://localhost:8080/app/acme, but the request comes
// to https://publicAddr. In that case do a 302 to the correct path instead
// of doing path re-writing on all requests. This is a workaround to make
// sure Teleport does not break SPA.
if location, ok := t.needsPathRedirect(r); ok {
return &http.Response{
Status: http.StatusText(http.StatusFound),
StatusCode: http.StatusFound,
Proto: r.Proto,
ProtoMajor: r.ProtoMajor,
ProtoMinor: r.ProtoMinor,
Body: http.NoBody,
Header: http.Header{
"Location": []string{location},
},
TLS: r.TLS,
}, nil
}
// Perform any request rewriting needed before forwarding the request.
if err := t.rewriteRequest(r); err != nil {
return nil, trace.Wrap(err)
}
// Forward the request to the target application and emit an audit event.
resp, err := t.tr.RoundTrip(r)
if err != nil {
return nil, trace.Wrap(err)
}
// Emit the event to the audit log.
if err := t.emitAuditEvent(r, resp); err != nil {
return nil, trace.Wrap(err)
}
// Perform any response rewriting needed before returning the request.
if err := t.rewriteResponse(resp); err != nil {
return nil, trace.Wrap(err)
}
return resp, nil
}
// rewriteRequest applies any rewriting rules to the request before it's forwarded.
func (t *transport) rewriteRequest(r *http.Request) error {
// Update the target address of the request so it's forwarded correctly.
r.URL.Scheme = t.uri.Scheme
r.URL.Host = t.uri.Host
// Add headers from rewrite configuration.
if t.c.rewrite != nil && len(t.c.rewrite.Headers) > 0 {
t.rewriteHeaders(r)
}
// Add in JWT headers.
r.Header.Set(teleport.AppJWTHeader, t.c.jwt)
r.Header.Set(teleport.AppCFHeader, t.c.jwt)
return nil
}
// rewriteHeaders applies headers rewrites from the application configuration.
func (t *transport) rewriteHeaders(r *http.Request) {
for _, header := range t.c.rewrite.Headers {
if IsReservedHeader(header.Name) {
t.c.log.Debugf("Not rewriting Teleport header %q.", header.Name)
continue
}
values, err := services.ApplyValueTraits(header.Value, t.c.traits)
if err != nil {
t.c.log.Debugf("Failed to apply traits to %q: %v.", header.Value, err)
continue
}
r.Header.Del(header.Name)
for _, value := range values {
switch http.CanonicalHeaderKey(header.Name) {
case teleport.HostHeader:
r.Host = value
default:
r.Header.Add(header.Name, value)
}
}
}
}
// ReservedHeaders is a list of headers injected by Teleport.
var ReservedHeaders = []string{
teleport.AppJWTHeader,
teleport.AppCFHeader,
forward.XForwardedFor,
forward.XForwardedHost,
forward.XForwardedProto,
forward.XForwardedServer,
}
// IsReservedHeader returns true if the provided header is one of headers
// injected by Teleport.
func IsReservedHeader(header string) bool {
for _, h := range ReservedHeaders {
if http.CanonicalHeaderKey(header) == http.CanonicalHeaderKey(h) {
return true
}
}
return false
}
// needsPathRedirect checks if the request should be redirected to a different path.
// At the moment, the only time a redirect happens is if URI specified is not
|
if uriPath == "." {
uriPath = "/"
}
if uriPath == "/" {
return "", false
}
// For simplicity, only support redirecting to the URI path if the root path
// is requested.
reqPath := path.Clean(r.URL.Path)
if reqPath == "." {
reqPath = "/"
}
if reqPath != "/" {
return "", false
}
u := url.URL{
Scheme: "https",
Host: net.JoinHostPort(t.c.publicAddr, t.c.publicPort),
Path: uriPath,
}
return u.String(), true
}
// rewriteResponse applies any rewriting rules to the response before returning it.
func (t *transport) rewriteResponse(resp *http.Response) error {
switch {
case t.c.rewrite != nil && len(t.c.rewrite.Redirect) > 0:
err := t.rewriteRedirect(resp)
if err != nil {
return trace.Wrap(err)
}
default:
}
return nil
}
// rewriteRedirect applies redirect rules to the response.
func (t *transport) rewriteRedirect(resp *http.Response) error {
if isRedirect(resp.StatusCode) {
// Parse the "Location" header.
u, err := url.Parse(resp.Header.Get("Location"))
if err != nil {
return trace.Wrap(err)
}
// If the redirect location is one of the hosts specified in the list of
// redirects, rewrite the header.
if utils.SliceContainsStr(t.c.rewrite.Redirect, host(u.Host)) {
u.Scheme = "https"
u.Host = net.JoinHostPort(t.c.publicAddr, t.c.publicPort)
}
resp.Header.Set("Location", u.String())
}
return nil
}
// emitAuditEvent writes the request and response to audit stream.
func (t *transport) emitAuditEvent(req *http.Request, resp *http.Response) error {
appSessionRequestEvent := &apievents.AppSessionRequest{
Metadata: apievents.Metadata{
Type: events.AppSessionRequestEvent,
Code: events.AppSessionRequestCode,
},
Method: req.Method,
Path: req.URL.Path,
RawQuery: req.URL.RawQuery,
StatusCode: uint32(resp.StatusCode),
}
if err := t.c.w.EmitAuditEvent(t.closeContext, appSessionRequestEvent); err != nil {
return trace.Wrap(err)
}
return nil
}
// configureTLS creates and configures a *tls.Config that will be used for
// mutual authentication.
func configureTLS(c *transportConfig) (*tls.Config, error) {
tlsConfig := utils.TLSConfig(c.cipherSuites)
// Don't verify the server's certificate if Teleport was started with
// the --insecure flag, or 'insecure_skip_verify' was specifically requested in
// the application config.
tlsConfig.InsecureSkipVerify = (lib.IsInsecureDevMode() || c.insecureSkipVerify)
return tlsConfig, nil
}
// host returns the host from a host:port string.
func host(addr string) string {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return addr
}
return host
}
// isRedirect returns true if the status code is a 3xx code.
func isRedirect(code int) bool {
if code >= http.StatusMultipleChoices && code <= http.StatusPermanentRedirect {
return true
}
return false
}
// websocketTransport combines parameters for websockets transport.
//
// Implements forward.ReqRewriter.
type websocketTransport struct {
uri *url.URL
dialer forward.Dialer
}
// newWebsocketTransport returns transport that knows how to rewrite and
// dial websocket requests.
func newWebsocketTransport(uri *url.URL, tlsConfig *tls.Config) *websocketTransport {
return &websocketTransport{
uri: uri,
dialer: func(network, address string) (net.Conn, error) {
// Request is going to "wss://".
if uri.Scheme == "https" {
return tls.Dial(network, address, tlsConfig)
}
// Request is going to "ws://".
return net.Dial(network, address)
},
}
}
// Rewrite rewrites the websocket request.
func (r *websocketTransport) Rewrite(req *http.Request) {
// Update scheme and host to those of the target app's to make sure
// it's forwarded correctly.
req.URL.Scheme = "ws"
if r.uri.Scheme == "https" {
req.URL.Scheme = "wss"
}
req.URL.Host = r.uri.Host
req.Host = r.uri.Host
}
|
// "/" and the public address being requested is "/".
func (t *transport) needsPathRedirect(r *http.Request) (string, bool) {
// If the URI for the application has no path specified, nothing to be done.
uriPath := path.Clean(t.uri.Path)
|
test_Vllgamma.py
|
import unittest
import flavio
from wilson import Wilson
from .Vllgamma import *
### implement test
class
|
(unittest.TestCase):
def test_np(self):
wc,br=Wilson({'CVRR_muecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),8.3949e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->muegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->muegamma)',wc),flavio.np_prediction('BR(J/psi->muegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CSRR_muecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),6.2935e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->muegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->muegamma)',wc),flavio.np_prediction('BR(J/psi->muegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CVRR_tauecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),1.2887e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->tauegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->tauegamma)',wc),flavio.np_prediction('BR(J/psi->tauegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CSRR_tauecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),9.1097e-7
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->tauegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->tauegamma)',wc),flavio.np_prediction('BR(J/psi->tauegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
|
TestVllgamma
|
Medium.d.ts
|
import * as React from 'react';
import { StyledIconProps } from '../../StyledIconBase';
export declare const Medium: React.ForwardRefExoticComponent<Pick<StyledIconProps, "string" | "max" | "accumulate" | "origin" | "end" | "hanging" | "alphabetic" | "ideographic" | "media" | "style" | "title" | "clipPath" | "filter" | "mask" | "result" | "local" | "color" | "clip" | "size" | "fill" | "stroke" | "mathematical" | "additive" | "key" | "children" | "cursor" | "direction" | "display" | "fontFamily" | "fontSize" | "fontSizeAdjust" | "fontStretch" | "fontStyle" | "fontVariant" | "fontWeight" | "height" | "imageRendering" | "letterSpacing" | "opacity" | "order" | "overflow" | "paintOrder" | "pointerEvents" | "rotate" | "scale" | "textRendering" | "transform" | "unicodeBidi" | "visibility" | "width" | "wordSpacing" | "writingMode" | "offset" | "textDecoration" | "alignmentBaseline" | "baselineShift" | "clipRule" | "colorInterpolation" | "colorRendering" | "dominantBaseline" | "fillOpacity" | "fillRule" | "floodColor" | "floodOpacity" | "glyphOrientationVertical" | "lightingColor" | "markerEnd" | "markerMid" | "markerStart" | "shapeRendering" | "stopColor" | "stopOpacity" | "strokeDasharray" | "strokeDashoffset" | "strokeLinecap" | "strokeLinejoin" | "strokeMiterlimit" | "strokeOpacity" | "strokeWidth" | "textAnchor" | "vectorEffect" | "className" | "id" | "lang" | "method" | "min" | "name" | "target" | "type" | "role" | "tabIndex" | "accentHeight" | "allowReorder" | "amplitude" | "arabicForm" | "ascent" | "attributeName" | "attributeType" | "autoReverse" | "azimuth" | "baseFrequency" | "baseProfile" | "bbox" | "begin" | "bias" | "by" | "calcMode" | "capHeight" | "clipPathUnits" | "colorInterpolationFilters" | "colorProfile" | "contentScriptType" | "contentStyleType" | "cx" | "cy" | "d" | "decelerate" | "descent" | "diffuseConstant" | "divisor" | "dur" | "dx" | "dy" | "edgeMode" | "elevation" | "enableBackground" | "exponent" | "externalResourcesRequired" | "filterRes" | "filterUnits" | "focusable" | "format" | "from" | "fx" | "fy" | "g1" | "g2" | "glyphName" | "glyphOrientationHorizontal" | "glyphRef" | "gradientTransform" | "gradientUnits" | "horizAdvX" | "horizOriginX" | "href" | "in2" | "in" | "intercept" | "k1" | "k2" | "k3" | "k4" | "k" | "kernelMatrix" | "kernelUnitLength" | "kerning" | "keyPoints" | "keySplines" | "keyTimes" | "lengthAdjust" | "limitingConeAngle" | "markerHeight" | "markerUnits" | "markerWidth" | "maskContentUnits" | "maskUnits" | "mode" | "numOctaves" | "operator" | "orient" | "orientation" | "overlinePosition" | "overlineThickness" | "panose1" | "pathLength" | "patternContentUnits" | "patternTransform" | "patternUnits" | "points" | "pointsAtX" | "pointsAtY" | "pointsAtZ" | "preserveAlpha" | "preserveAspectRatio" | "primitiveUnits" | "r" | "radius" | "refX" | "refY" | "renderingIntent" | "repeatCount" | "repeatDur" | "requiredExtensions" | "requiredFeatures" | "restart" | "rx" | "ry" | "seed" | "slope" | "spacing" | "specularConstant" | "specularExponent" | "speed" | "spreadMethod" | "startOffset" | "stdDeviation" | "stemh" | "stemv" | "stitchTiles" | "strikethroughPosition" | "strikethroughThickness" | "surfaceScale" | "systemLanguage" | "tableValues" | "targetX" | "targetY" | "textLength" | "to" | "u1" | "u2" | "underlinePosition" | "underlineThickness" | "unicode" | "unicodeRange" | "unitsPerEm" | "vAlphabetic" | "values" | "version" | "vertAdvY" | "vertOriginX" | "vertOriginY" | "vHanging" | "vIdeographic" | "viewBox" | "viewTarget" | "vMathematical" | "widths" | "x1" | "x2" | "x" | "xChannelSelector" | "xHeight" | "xlinkActuate" | "xlinkArcrole" | "xlinkHref" | "xlinkRole" | "xlinkShow" | "xlinkTitle" | "xlinkType" | "xmlBase" | "xmlLang" | "xmlns" | "xmlnsXlink" | "xmlSpace" | "y1" | "y2" | "y" | "yChannelSelector" | "z" | "zoomAndPan" | "aria-activedescendant" | "aria-atomic" | "aria-autocomplete" | "aria-busy" | "aria-checked" | "aria-colcount" | "aria-colindex" | "aria-colspan" | "aria-controls" | "aria-current" | "aria-describedby" | "aria-details" | "aria-disabled" | "aria-dropeffect" | "aria-errormessage" | "aria-expanded" | "aria-flowto" | "aria-grabbed" | "aria-haspopup" | "aria-hidden" | "aria-invalid" | "aria-keyshortcuts" | "aria-label" | "aria-labelledby" | "aria-level" | "aria-live" | "aria-modal" | "aria-multiline" | "aria-multiselectable" | "aria-orientation" | "aria-owns" | "aria-placeholder" | "aria-posinset" | "aria-pressed" | "aria-readonly" | "aria-relevant" | "aria-required" | "aria-roledescription" | "aria-rowcount" | "aria-rowindex" | "aria-rowspan" | "aria-selected" | "aria-setsize" | "aria-sort" | "aria-valuemax" | "aria-valuemin" | "aria-valuenow" | "aria-valuetext" | "dangerouslySetInnerHTML" | "onCopy" | "onCopyCapture" | "onCut" | "onCutCapture" | "onPaste" | "onPasteCapture" | "onCompositionEnd" | "onCompositionEndCapture" | "onCompositionStart" | "onCompositionStartCapture" | "onCompositionUpdate" | "onCompositionUpdateCapture" | "onFocus" | "onFocusCapture" | "onBlur" | "onBlurCapture" | "onChange" | "onChangeCapture" | "onBeforeInput" | "onBeforeInputCapture" | "onInput" | "onInputCapture" | "onReset" | "onResetCapture" | "onSubmit" | "onSubmitCapture" | "onInvalid" | "onInvalidCapture" | "onLoad" | "onLoadCapture" | "onError" | "onErrorCapture" | "onKeyDown" | "onKeyDownCapture" | "onKeyPress" | "onKeyPressCapture" | "onKeyUp" | "onKeyUpCapture" | "onAbort" | "onAbortCapture" | "onCanPlay" | "onCanPlayCapture" | "onCanPlayThrough" | "onCanPlayThroughCapture" | "onDurationChange" | "onDurationChangeCapture" | "onEmptied" | "onEmptiedCapture" | "onEncrypted" | "onEncryptedCapture" | "onEnded" | "onEndedCapture" | "onLoadedData" | "onLoadedDataCapture" | "onLoadedMetadata" | "onLoadedMetadataCapture" | "onLoadStart" | "onLoadStartCapture" | "onPause" | "onPauseCapture" | "onPlay" | "onPlayCapture" | "onPlaying" | "onPlayingCapture" | "onProgress" | "onProgressCapture" | "onRateChange" | "onRateChangeCapture" | "onSeeked" | "onSeekedCapture" | "onSeeking" | "onSeekingCapture" | "onStalled" | "onStalledCapture" | "onSuspend" | "onSuspendCapture" | "onTimeUpdate" | "onTimeUpdateCapture" | "onVolumeChange" | "onVolumeChangeCapture" | "onWaiting" | "onWaitingCapture" | "onAuxClick" | "onAuxClickCapture" | "onClick" | "onClickCapture" | "onContextMenu" | "onContextMenuCapture" | "onDoubleClick" | "onDoubleClickCapture" | "onDrag" | "onDragCapture" | "onDragEnd" | "onDragEndCapture" | "onDragEnter" | "onDragEnterCapture" | "onDragExit" | "onDragExitCapture" | "onDragLeave" | "onDragLeaveCapture" | "onDragOver" | "onDragOverCapture" | "onDragStart" | "onDragStartCapture" | "onDrop" | "onDropCapture" | "onMouseDown" | "onMouseDownCapture" | "onMouseEnter" | "onMouseLeave" | "onMouseMove" | "onMouseMoveCapture" | "onMouseOut" | "onMouseOutCapture" | "onMouseOver" | "onMouseOverCapture" | "onMouseUp" | "onMouseUpCapture" | "onSelect" | "onSelectCapture" | "onTouchCancel" | "onTouchCancelCapture" | "onTouchEnd" | "onTouchEndCapture" | "onTouchMove" | "onTouchMoveCapture" | "onTouchStart" | "onTouchStartCapture" | "onPointerDown" | "onPointerDownCapture" | "onPointerMove" | "onPointerMoveCapture" | "onPointerUp" | "onPointerUpCapture" | "onPointerCancel" | "onPointerCancelCapture" | "onPointerEnter" | "onPointerEnterCapture" | "onPointerLeave" | "onPointerLeaveCapture" | "onPointerOver" | "onPointerOverCapture" | "onPointerOut" | "onPointerOutCapture" | "onGotPointerCapture" | "onGotPointerCaptureCapture" | "onLostPointerCapture" | "onLostPointerCaptureCapture" | "onScroll" | "onScrollCapture" | "onWheel" | "onWheelCapture" | "onAnimationStart" | "onAnimationStartCapture" | "onAnimationEnd" | "onAnimationEndCapture" | "onAnimationIteration" | "onAnimationIterationCapture" | "onTransitionEnd" | "onTransitionEndCapture"> & React.RefAttributes<SVGSVGElement>>;
export declare const MediumDimensions: {
|
width: number;
};
|
height: number;
|
ads.ts
|
import {Entity, Column, PrimaryGeneratedColumn} from 'typeorm';
import {Length} from 'class-validator';
@Entity()
export class
|
{
@PrimaryGeneratedColumn()
id: number
@Column({
length: 80,
comment: 'ๅปฃๅๅ็จฑ',
})
@Length(10, 80)
name: string
@Column({
length: 80,
comment: 'ๆชๆกๅ็จฑ',
})
@Length(10, 80)
filename: string
@Column({type: 'int', comment: '้ ๅบ'})
order: number
}
export const adsSchema = {
id: {type: 'number', required: true, example: 1},
name: {type: 'string', required: true, example: 'ๅฅฝๅ่ถ'},
filename: {type: 'string', required: true, example: 'ๅฅฝๅ่ถ.jpg'},
order: {type: 'number', required: true, example: 1},
};
|
Ads
|
baseApi.js
|
/* eslint-disable no-prototype-builtins,no-restricted-syntax */
import 'whatwg-fetch';
import auth from '../common/auth';
import { API_ROOT } from '../configs/config';
import { SYMBOL_FOR_FETCH_API } from '../constants/fetchType';
import methodType from '../constants/methodType';
function isEnableBody(method) {
return method !== methodType.GET;
}
export default function
|
() {
return (next) => (action) => {
const API_OPTIONS = action[SYMBOL_FOR_FETCH_API];
const fetchOptions = {};
let queryString = '';
if (typeof API_OPTIONS === 'undefined') {
return next(action);
}
const { types, uri, method, body, onSuccess, onFail } = API_OPTIONS;
const [requestType, successType, failureType] = types;
next(
Object.assign({}, action, {
type: requestType,
body,
})
);
Object.assign(fetchOptions, {
method,
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
Authorization: `Bearer ${auth.getAccessToken()}`,
},
});
if (isEnableBody(method)) {
fetchOptions.body = JSON.stringify(body);
}
if (!isEnableBody(method) && typeof body === 'object') {
queryString = [];
for (const key in body) {
if (body.hasOwnProperty(key)) {
queryString.push(
`${key}=${encodeURIComponent(
typeof body[key] === 'object'
? JSON.stringify(body[key])
: body[key]
)}`
);
}
}
queryString = queryString.join('&');
if (uri.indexOf('?') === -1) {
queryString = `?${queryString}`;
}
}
return fetch(API_ROOT + uri + queryString, fetchOptions)
.then((response) =>
response.text().then((text) => {
try {
return {
json: JSON.parse(text),
response,
};
} catch (e) {
return {
json: text,
response,
};
}
})
)
.then(({ json, response }) => {
if (!response.ok) {
return Promise.reject(json);
}
return json;
})
.then(
(response) => {
const data = Object.assign({}, action, {
response,
type: successType,
body,
});
if (typeof onSuccess === 'function') {
onSuccess(data);
}
return next(data);
},
(error) => {
const data = Object.assign({}, action, {
type: failureType,
error:
typeof error === 'object'
? error.message || 'Unknown Error'
: error,
body,
});
if (typeof onFail === 'function') {
onFail(data);
}
return next(data);
}
);
};
}
|
thunkMiddleware
|
zero_test.go
|
package zero
import (
"bytes"
"fmt"
"reflect"
"testing"
)
type TestDetailParam struct {
ID int
}
type TestDetailSubStructure struct {
ID int
Params []TestDetailParam
}
type TestDetail struct {
ID int
Detail Detail
Data TestDetailSubStructure
}
type Detail interface{}
func TestZero(t *testing.T) {
one, zeroInt := 1, 0
ch1 := make(chan int)
var zeroChan chan int
type myString string
var interface1, interfaceZero interface{} = &one, &zeroInt
var (
zeroDetail1 Detail = &struct{}{}
zeroDetail2 Detail = &TestDetail{}
zeroDetail3 Detail = struct{}{}
zeroDetail4 Detail = &TestDetail{}
zeroDetail5 Detail = &TestDetail{Data: TestDetailSubStructure{Params: nil}}
zeroDetail6 Detail = &TestDetail{Data: TestDetailSubStructure{
Params: make([]TestDetailParam, 0, 10)},
}
nonZeroDetail1 Detail = &TestDetail{Data: TestDetailSubStructure{
Params: []TestDetailParam{TestDetailParam{55}}},
}
nonZeroDetail2 Detail = &TestDetail{Data: TestDetailSubStructure{ID: 1234}}
nonZeroDetail3 Detail = &TestDetail{ID: 1234}
nonZeroDetail4 Detail = &TestDetail{Detail: nonZeroDetail3}
)
for i, test := range []struct {
v interface{}
want bool
}{
// basic types
{0, true},
{complex(0, 0), true},
{1, false},
{1.0, false},
{true, false},
{0.0, true},
{"foo", false},
{"", true},
{myString(""), true}, // different types
{myString("foo"), false}, // different types
// slices
{[]string{"foo"}, false},
{[]string(nil), true},
{[]string{}, true},
// maps
{map[string][]int{"foo": {1, 2, 3}}, false},
{map[string][]int{"foo": {1, 2, 3}}, false},
{map[string][]int{}, true},
{map[string][]int(nil), true},
// pointers
{&one, false},
{&zeroInt, true},
{new(bytes.Buffer), true},
// functions
{(func())(nil), true},
{func() {}, false},
// arrays
{[...]int{1, 2, 3}, false},
// channels
{ch1, false},
{zeroChan, true},
// interfaces
{&interface1, false},
{&interfaceZero, true},
// special case for structures
{zeroDetail1, true},
{zeroDetail2, true},
{zeroDetail3, true},
{zeroDetail4, true},
{zeroDetail5, true},
{zeroDetail6, true},
{nonZeroDetail1, false},
{nonZeroDetail2, false},
{nonZeroDetail3, false},
{nonZeroDetail4, false},
} {
if IsZero(test.v) != test.want {
t.Errorf("Zero(%v)[%d] = %t", test.v, i, !test.want)
}
}
}
func BenchmarkDetail(b *testing.B) {
var nonZeroDetail1 Detail = &TestDetail{Data: TestDetailSubStructure{
Params: []TestDetailParam{TestDetailParam{55}}},
}
for i := 0; i < b.N; i++ {
IsZero(nonZeroDetail1)
}
}
func BenchmarkDetailSimple(b *testing.B) {
var nonZeroDetail1 Detail = &TestDetailParam{}
|
}
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func IsEmptyDetail(detail Detail) bool {
v := reflect.ValueOf(detail).Elem()
for i := 0; i < v.NumField(); i++ {
if !isEmptyValue(v.Field(i)) {
return false
}
}
return true
}
func BenchmarkIsEmpty(b *testing.B) {
var nonZeroDetail1 Detail = &TestDetail{Data: TestDetailSubStructure{Params: []TestDetailParam{TestDetailParam{55}}}}
for i := 0; i < b.N; i++ {
IsEmptyDetail(nonZeroDetail1)
}
}
func BenchmarkIsEmptySimple(b *testing.B) {
var nonZeroDetail1 Detail = &TestDetailParam{}
for i := 0; i < b.N; i++ {
IsEmptyDetail(nonZeroDetail1)
}
}
type Structure struct {
ID int
}
func ExampleStructure() {
zeroStructure := Structure{}
zeroStructurePointer := &zeroStructure
nonZero := Structure{ID: 1}
nonZeroPointer := &nonZero
fmt.Println(IsZero(zeroStructure)) // true
fmt.Println(IsZero(zeroStructurePointer)) // true
fmt.Println(IsZero(nonZero)) // false
fmt.Println(IsZero(nonZeroPointer)) // false
// Output:
// true
// true
// false
// false
}
|
for i := 0; i < b.N; i++ {
IsZero(nonZeroDetail1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.