code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv | def findCaller(self) | Find the stack frame of the caller so that we can note the source
file name, line number and function name. | 1.583944 | 1.465866 | 1.080552 |
try:
str_fmt = s
if ":" not in s:
str_fmt = '{} {}'.format(s, default_time)
dt_obj = datetime.strptime(str_fmt, "%Y-%m-%d %H:%M:%S")
return RET_OK, dt_obj
except ValueError:
error_str = ERROR_STR_PREFIX + "wrong time or time format"
return RET_ERROR, error_str | def check_date_str_format(s, default_time="00:00:00") | Check the format of date string | 3.398051 | 3.236608 | 1.04988 |
ret_code, ret_data = check_date_str_format(date_str, default_time)
if ret_code != RET_OK:
return ret_code, ret_data
return RET_OK, ret_data.strftime("%Y-%m-%d %H:%M:%S") | def normalize_date_format(date_str, default_time="00:00:00") | normalize the format of data | 2.932948 | 2.682453 | 1.093383 |
try:
rsp = json.loads(rsp_str)
except ValueError:
traceback.print_exc()
err = sys.exc_info()[1]
err_str = ERROR_STR_PREFIX + str(err)
return RET_ERROR, err_str, None
error_code = int(rsp['retType'])
if error_code != 1:
error_str = ERROR_STR_PREFIX + rsp['retMsg']
return RET_ERROR, error_str, None
return RET_OK, "", rsp | def extract_pls_rsp(rsp_str) | Extract the response of PLS | 2.868207 | 2.851877 | 1.005726 |
stock_str = str(stock_str_param)
split_loc = stock_str.find(".")
'''do not use the built-in split function in python.
The built-in function cannot handle some stock strings correctly.
for instance, US..DJI, where the dot . itself is a part of original code'''
if 0 <= split_loc < len(
stock_str) - 1 and stock_str[0:split_loc] in MKT_MAP:
market_str = stock_str[0:split_loc]
market_code = MKT_MAP[market_str]
partial_stock_str = stock_str[split_loc + 1:]
return RET_OK, (market_code, partial_stock_str)
else:
error_str = ERROR_STR_PREFIX + "format of %s is wrong. (US.AAPL, HK.00700, SZ.000001)" % stock_str
return RET_ERROR, error_str | def split_stock_str(stock_str_param) | split the stock string | 4.227465 | 4.144515 | 1.020014 |
market_str = QUOTE.REV_MKT_MAP[qot_mkt]
stock_str = '.'.join([market_str, partial_stock_str])
return stock_str | def merge_qot_mkt_stock_str(qot_mkt, partial_stock_str) | Merge the string of stocks
:param market: market code
:param partial_stock_str: original stock code string. i.e. "AAPL","00700", "000001"
:return: unified representation of a stock code. i.e. "US.AAPL", "HK.00700", "SZ.000001" | 5.252239 | 5.109868 | 1.027862 |
mkt_qot = Market.NONE
mkt = TRADE.REV_TRD_MKT_MAP[trd_mkt] if trd_mkt in TRADE.REV_TRD_MKT_MAP else TrdMarket.NONE
if mkt == TrdMarket.HK:
mkt_qot = Market.HK
elif mkt == TrdMarket.US:
mkt_qot = Market.US
elif mkt == TrdMarket.HKCC or mkt == TrdMarket.CN:
if partial_stock_str.startswith('6') or partial_stock_str.startswith('9'):
mkt_qot = Market.SH
else:
mkt_qot = Market.SZ
else:
raise Exception("merge_trd_mkt_stock_str: unknown trd_mkt.")
return merge_qot_mkt_stock_str(MKT_MAP[mkt_qot], partial_stock_str) | def merge_trd_mkt_stock_str(trd_mkt, partial_stock_str) | Merge the string of stocks
:param market: market code
:param partial_stock_str: original stock code string. i.e. "AAPL","00700", "000001"
:return: unified representation of a stock code. i.e. "US.AAPL", "HK.00700", "SZ.000001" | 2.757862 | 2.724903 | 1.012095 |
if proto_fmt_type == ProtoFMT.Json:
return b.decode('utf-8')
elif proto_fmt_type == ProtoFMT.Protobuf:
rsp = pb_map[proto_id]
if IS_PY2:
rsp.ParseFromString(str(b))
else:
rsp.ParseFromString(b)
return MessageToJson(rsp)
else:
raise Exception("binary2str: unknown proto format.") | def binary2str(b, proto_id, proto_fmt_type) | Transfer binary to string
:param b: binary content to be transformed to string
:return: string | 3.473826 | 3.593971 | 0.96657 |
rsp = pb_map[proto_id]
if rsp is None:
return None
if proto_fmt_type == ProtoFMT.Json:
return json2pb(type(rsp), b.decode('utf-8'))
elif proto_fmt_type == ProtoFMT.Protobuf:
rsp = type(rsp)()
# logger.debug((proto_id))
if IS_PY2:
rsp.ParseFromString(str(b))
else:
rsp.ParseFromString(b)
return rsp
else:
raise Exception("binary2str: unknown proto format.") | def binary2pb(b, proto_id, proto_fmt_type) | Transfer binary to pb message
:param b: binary content to be transformed to pb message
:return: pb message | 4.156219 | 4.281091 | 0.970832 |
d = t._asdict()
d.update(kwargs)
cls = type(t)
return cls(**d) | def make_from_namedtuple(t, **kwargs) | t是namedtuple,复制一份t,但其中部分字段更新为kwargs的值
:param t:
:param kwargs:
:return: | 2.992139 | 2.892589 | 1.034416 |
ret, content = super(TrailingStopHandler, self).on_recv_rsp(rsp_str)
if ret != ft.RET_OK:
print('StockQuote error {}'.format(content))
return ret, content
if self.finished:
return ret, content
ret, data = self.quote_ctx.get_global_state()
if ret != ft.RET_OK:
print('获取全局状态失败')
trading = False
else:
hk_trading = (data['market_hk'] == ft.MarketState.MORNING or data['market_hk'] == ft.MarketState.AFTERNOON)
us_trading = (data['market_us'] == ft.MarketState.MORNING)
trading = hk_trading if self.is_hk_trade else us_trading
if not trading:
print('不处在交易时间段')
return ft.RET_OK, content
last_price = content.iloc[0]['last_price']
if self.stop is None:
self.stop = last_price - self.drop if self.method == TrailingMethod.DROP_ABS else last_price * (1 - self.drop)
elif (self.stop + self.drop < last_price) if self.method == TrailingMethod.DROP_ABS else (self.stop < last_price * (1 - self.drop)):
self.stop = last_price - self.drop if self.method == TrailingMethod.DROP_ABS else last_price * (1 - self.drop)
elif self.stop >= last_price:
# 交易己被触发
self.finished = True
print('交易被触发')
self.price_lst.append(last_price)
self.stop_lst.append(self.stop)
print('last_price is {}, stop is {}'.format(last_price, self.stop))
return ft.RET_OK, content | def on_recv_rsp(self, rsp_str) | 数据接收回调函数 | 3.205566 | 3.172699 | 1.010359 |
if self.unlock_password == "":
raise Exception("请先配置交易解锁密码! password: {}".format(
self.unlock_password))
quote_ctx = ft.OpenQuoteContext(
host=self.api_svr_ip, port=self.api_svr_port)
if 'HK.' in self.stock:
trade_ctx = ft.OpenHKTradeContext(
host=self.api_svr_ip, port=self.api_svr_port)
if self.trade_env == ft.TrdEnv.REAL:
ret_code, ret_data = trade_ctx.unlock_trade(
self.unlock_password)
if ret_code == ft.RET_OK:
print('解锁交易成功!')
else:
raise Exception("请求交易解锁失败: {}".format(ret_data))
else:
print('解锁交易成功!')
elif 'US.' in self.stock:
if self.trade_env != 0:
raise Exception("美股交易接口不支持仿真环境 trade_env: {}".format(
self.trade_env))
trade_ctx = ft.OpenUSTradeContext(
host=self.api_svr_ip, port=self.api_svr_port)
else:
raise Exception("stock输入错误 stock: {}".format(self.stock))
return quote_ctx, trade_ctx | def context_setting(self) | API trading and quote context setting
:returns: trade context, quote context | 3.072449 | 2.899055 | 1.059811 |
with self._lock:
if self._handler_ctx is not None:
return self._handler_ctx.set_handler(handler)
return RET_ERROR | def set_handler(self, handler) | 设置异步回调处理对象
:param handler: 回调处理对象,必须是以下类的子类实例
=============================== =========================
类名 说明
=============================== =========================
StockQuoteHandlerBase 报价处理基类
OrderBookHandlerBase 摆盘处理基类
CurKlineHandlerBase 实时k线处理基类
TickerHandlerBase 逐笔处理基类
RTDataHandlerBase 分时数据处理基类
BrokerHandlerBase 经济队列处理基类
=============================== =========================
:return: RET_OK: 设置成功
RET_ERROR: 设置失败 | 5.256451 | 4.808024 | 1.093266 |
'''set pre handler'''
with self._lock:
if self._handler_ctx is not None:
return self._handler_ctx.set_pre_handler(handler)
return RET_ERROR | def set_pre_handler(self, handler) | set pre handler | 5.031463 | 5.456008 | 0.922188 |
def sync_query_processor(**kargs):
while True:
with self._lock:
if self._status == ContextStatus.Ready:
net_mgr = self._net_mgr
conn_id = self._conn_id
break
sleep(0.01)
try:
ret_code, msg, req_str = pack_func(**kargs)
if ret_code != RET_OK:
return ret_code, msg, None
ret_code, msg, rsp_str = net_mgr.sync_query(conn_id, req_str)
if ret_code != RET_OK:
return ret_code, msg, None
ret_code, msg, content = unpack_func(rsp_str)
if ret_code != RET_OK:
return ret_code, msg, None
except Exception as e:
logger.error(traceback.format_exc())
return RET_ERROR, str(e), None
return RET_OK, msg, content
return sync_query_processor | def _get_sync_query_processor(self, pack_func, unpack_func, is_create_socket=True) | synchronize the query processor
:param pack_func: back
:param unpack_func: unpack
:return: sync_query_processor | 2.401831 | 2.464469 | 0.974584 |
logger.info("Start connecting: host={}; port={};".format(self.__host, self.__port))
with self._lock:
self._status = ContextStatus.Connecting
# logger.info("try connecting: host={}; port={};".format(self.__host, self.__port))
ret, msg, conn_id = self._net_mgr.connect((self.__host, self.__port), self, 5)
if ret == RET_OK:
self._conn_id = conn_id
else:
logger.warning(msg)
if ret == RET_OK:
while True:
with self._lock:
if self._sync_req_ret is not None:
if self._sync_req_ret.ret == RET_OK:
self._status = ContextStatus.Ready
else:
ret, msg = self._sync_req_ret.ret, self._sync_req_ret.msg
self._sync_req_ret = None
break
sleep(0.01)
if ret == RET_OK:
ret, msg = self.on_api_socket_reconnected()
else:
self._wait_reconnect()
return RET_OK, '' | def _socket_reconnect_and_wait_ready(self) | sync_socket & async_socket recreate
:return: (ret, msg) | 3.308915 | 3.152822 | 1.049509 |
query_processor = self._get_sync_query_processor(
GlobalStateQuery.pack_req, GlobalStateQuery.unpack_rsp)
kargs = {
'user_id': self.get_login_user_id(),
'conn_id': self.get_sync_conn_id(),
}
ret_code, msg, state_dict = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
return RET_OK, state_dict | def get_global_state(self) | 获取全局状态
:return: (ret, data)
ret == RET_OK data为包含全局状态的字典,含义如下
ret != RET_OK data为错误描述字符串
===================== =========== ==============================================================
key value类型 说明
===================== =========== ==============================================================
market_sz str 深圳市场状态,参见MarketState
market_us str 美国市场状态,参见MarketState
market_sh str 上海市场状态,参见MarketState
market_hk str 香港市场状态,参见MarketState
market_future str 香港期货市场状态,参见MarketState
server_ver str FutuOpenD版本号
trd_logined str '1':已登录交易服务器,'0': 未登录交易服务器
qot_logined str '1':已登录行情服务器,'0': 未登录行情服务器
timestamp str Futu后台服务器当前时间戳(秒)
local_timestamp double FutuOpenD运行机器当前时间戳(
===================== =========== ==============================================================
:example:
.. code:: python
from futuquant import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_global_state())
quote_ctx.close() | 4.118131 | 4.546037 | 0.905873 |
# 创建行情api
quote_ctx = ft.OpenQuoteContext(host=api_svr_ip, port=api_svr_port)
stock_type = [ft.SecurityType.STOCK, ft.SecurityType.IDX, ft.SecurityType.ETF, ft.SecurityType.WARRANT,
ft.SecurityType.BOND]
stock_codes = []
# 枚举所有的股票类型,获取股票codes
for sub_type in stock_type:
ret_code, ret_data = quote_ctx.get_stock_basicinfo(market, sub_type)
if ret_code == 0:
print("get_stock_basicinfo: market={}, sub_type={}, count={}".format(market, sub_type, len(ret_data)))
for ix, row in ret_data.iterrows():
stock_codes.append(row['code'])
if len(stock_codes) == 0:
quote_ctx.close()
print("Error market:'{}' can not get stock info".format(market))
return
# 按频率限制获取股票快照: 每5秒200支股票
for i in range(1, len(stock_codes), 200):
print("from {}, total {}".format(i, len(stock_codes)))
ret_code, ret_data = quote_ctx.get_market_snapshot(stock_codes[i:i + 200])
if ret_code != 0:
print(ret_data)
time.sleep(3)
quote_ctx.close() | def loop_get_mkt_snapshot(api_svr_ip, api_svr_port, market) | 验证接口:获取某个市场的全部快照数据 get_mkt_snapshot
:param api_svr_ip: (string)ip
:param api_svr_port: (int)port
:param market: market type
:return: | 2.821866 | 2.79622 | 1.009171 |
symbol = self.symbol_pools[0]
now = datetime.datetime.now()
work_time = now.replace(hour=15, minute=55, second=0)
if now == work_time:
quote_ctx = OpenQuoteContext(host='172.24.31.139', port=11111)
data = tiny_bar
price = data.open
start_day = (now - datetime.timedelta(days=100)).strftime('%Y-%m-%d')
end_day = now.strftime('%Y-%m-%d')
history_result, history_kline_result = quote_ctx.get_history_kline(symbol, start=start_day, end=end_day)
result, kline_result = quote_ctx.get_history_kline(symbol, start=start_day, end=end_day, ktype='K_5M')
if history_result == 0 and result == 0 and history_kline_result.shape[0] >= 25 and kline_result.shape[0] > 0 :
close_price = kline_result[-1:]
close_price_array = history_kline_result['close']
close_price_array.append(close_price)
df = pd.DataFrame()
df['EMA12'] = talib.EMA(np.array(close_price_array), timeperiod=6)
df['EMA26'] = talib.EMA(np.array(close_price_array), timeperiod=12)
df['MACD'], df['MACDsignal'], df['MACDhist'] = talib.MACD(np.array(close_price_array), fastperiod=6, slowperiod=12, signalperiod=9)
signal = df['MACDsignal'][-1:].values[0]
if signal > 0:
self.do_trade(symbol, price, "buy")
elif signal <0:
self.do_trade(symbol, price, "sell")
quote_ctx = OpenQuoteContext(host='172.24.31.139', port=11111) | def on_bar_min1(self, tiny_bar) | 每一分钟触发一次回调 | 2.417292 | 2.402568 | 1.006128 |
self.count += 1
if not self.inited and self.count >= self.size:
self.inited = True
self.openArray[0:self.size - 1] = self.openArray[1:self.size]
self.highArray[0:self.size - 1] = self.highArray[1:self.size]
self.lowArray[0:self.size - 1] = self.lowArray[1:self.size]
self.closeArray[0:self.size - 1] = self.closeArray[1:self.size]
self.volumeArray[0:self.size - 1] = self.volumeArray[1:self.size]
self.openArray[-1] = bar.open
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.closeArray[-1] = bar.close
self.volumeArray[-1] = bar.volume | def updateBar(self, bar) | 更新K线 | 1.504265 | 1.378073 | 1.091571 |
result = talib.SMA(self.close, n)
if array:
return result
return result[-1] | def sma(self, n, array=False) | 简单均线 | 3.060747 | 2.685384 | 1.13978 |
result = talib.STDDEV(self.close, n)
if array:
return result
return result[-1] | def std(self, n, array=False) | 标准差 | 3.691486 | 3.29056 | 1.121841 |
result = talib.CCI(self.high, self.low, self.close, n)
if array:
return result
return result[-1] | def cci(self, n, array=False) | CCI指标 | 2.572417 | 2.451144 | 1.049476 |
result = talib.ATR(self.high, self.low, self.close, n)
if array:
return result
return result[-1] | def atr(self, n, array=False) | ATR指标 | 2.688263 | 2.467123 | 1.089635 |
result = talib.RSI(self.close, n)
if array:
return result
return result[-1] | def rsi(self, n, array=False) | RSI指标 | 3.028357 | 2.836164 | 1.067765 |
macd, signal, hist = talib.MACD(self.close, fastPeriod,
slowPeriod, signalPeriod)
if array:
return macd, signal, hist
return macd[-1], signal[-1], hist[-1] | def macd(self, fastPeriod, slowPeriod, signalPeriod, array=False) | MACD指标 | 1.966629 | 1.786647 | 1.100737 |
result = talib.ADX(self.high, self.low, self.close, n)
if array:
return result
return result[-1] | def adx(self, n, array=False) | ADX指标 | 2.496696 | 2.365845 | 1.055308 |
mid = self.sma(n, array)
std = self.std(n, array)
up = mid + std * dev
down = mid - std * dev
return up, down | def boll(self, n, dev, array=False) | 布林通道 | 2.977208 | 2.984347 | 0.997608 |
mid = self.sma(n, array)
atr = self.atr(n, array)
up = mid + atr * dev
down = mid - atr * dev
return up, down | def keltner(self, n, dev, array=False) | 肯特纳通道 | 2.920581 | 2.793446 | 1.045512 |
up = talib.MAX(self.high, n)
down = talib.MIN(self.low, n)
if array:
return up, down
return up[-1], down[-1] | def donchian(self, n, array=False) | 唐奇安通道 | 2.558486 | 2.424947 | 1.055069 |
ret_code, ret_data = UpdateOrderPush.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, ret_data
else:
order_dict = ret_data
col_list = ['trd_env', 'code', 'stock_name', 'dealt_avg_price', 'dealt_qty',
'qty', 'order_id', 'order_type', 'price', 'order_status',
'create_time', 'updated_time', 'trd_side', 'last_err_msg', 'trd_market',
]
trade_frame_table = pd.DataFrame([order_dict], columns=col_list)
return RET_OK, trade_frame_table | def on_recv_rsp(self, rsp_pb) | receive response callback function | 4.208801 | 4.148903 | 1.014437 |
ret_code, ret_data = UpdateDealPush.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, ret_data
else:
deal_dict = ret_data
col_list = ['trd_env', 'code', 'stock_name', 'deal_id', 'order_id',
'qty', 'price', 'trd_side', 'create_time', 'counter_broker_id',
'counter_broker_name', 'trd_market',
]
trade_frame_table = pd.DataFrame([deal_dict], columns=col_list)
return RET_OK, trade_frame_table | def on_recv_rsp(self, rsp_pb) | receive response callback function | 4.538386 | 4.532419 | 1.001317 |
ret_code, msg, _= SubAccPush.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_sub_acc_push(ret_code, msg)
return ret_code, msg | def on_recv_rsp(self, rsp_pb) | receive response callback function | 6.537687 | 6.527798 | 1.001515 |
lot_size = 0
while True:
sleep(0.1)
if lot_size == 0:
ret, data = quote_ctx.get_market_snapshot(stock_code)
lot_size = data.iloc[0]['lot_size'] if ret == ft.RET_OK else 0
if ret != ft.RET_OK:
print("can't get lot size, retrying: {}".format(data))
continue
elif lot_size <= 0:
raise Exception('lot size error {}:{}'.format(lot_size, stock_code))
qty = int(volume // lot_size) * lot_size
ret, data = trade_ctx.place_order(price=trade_price, qty=qty, code=stock_code,
trd_side=ft.TrdSide.SELL, trd_env=trade_env, order_type=order_type)
if ret != ft.RET_OK:
print('下单失败:{}'.format(data))
return None
else:
print('下单成功')
return data | def simple_sell(quote_ctx, trade_ctx, stock_code, trade_price, volume, trade_env, order_type=ft.OrderType.SPECIAL_LIMIT) | 简单卖出函数 | 2.650961 | 2.622953 | 1.010678 |
'''
such as: user.username
such as: replies.content
'''
relation = getattr(instance.__class__, fields[0]).property
_field = getattr(instance, fields[0])
if relation.lazy == 'dynamic':
_field = _field.first()
return getattr(_field, fields[1]) if _field else '' | def relation_column(instance, fields) | such as: user.username
such as: replies.content | 6.187075 | 4.119297 | 1.501974 |
'''
get index
'''
index_name = self.index_name
if hasattr(model, "__msearch_index__"):
index_name = model.__msearch_index__
name = model
if not isinstance(model, str):
name = model.__table__.name
if name not in self._indexs:
self._indexs[name] = Index(index_name, model, self.analyzer)
return self._indexs[name] | def _index(self, model) | get index | 3.409145 | 3.173796 | 1.074154 |
'''
:param instance: sqlalchemy instance object
:param update: when update is True,use `update_document`,default `False`
:param delete: when delete is True,use `delete_by_term` with id(primary key),default `False`
:param commit: when commit is True,writer would use writer.commit()
:raise: ValueError:when both update is True and delete is True
:return: instance
'''
if update and delete:
raise ValueError("update and delete can't work togther")
table = instance.__class__
ix = self._index(table)
searchable = ix.fields
attrs = {DEFAULT_PRIMARY_KEY: str(instance.id)}
for field in searchable:
if '.' in field:
attrs[field] = str(relation_column(instance, field.split('.')))
else:
attrs[field] = str(getattr(instance, field))
if delete:
logger.debug('deleting index: {}'.format(instance))
ix.delete(fieldname=DEFAULT_PRIMARY_KEY, text=str(instance.id))
elif update:
logger.debug('updating index: {}'.format(instance))
ix.update(**attrs)
else:
logger.debug('creating index: {}'.format(instance))
ix.create(**attrs)
if commit:
ix.commit()
return instance | def create_one_index(self,
instance,
update=False,
delete=False,
commit=True) | :param instance: sqlalchemy instance object
:param update: when update is True,use `update_document`,default `False`
:param delete: when delete is True,use `delete_by_term` with id(primary key),default `False`
:param commit: when commit is True,writer would use writer.commit()
:raise: ValueError:when both update is True and delete is True
:return: instance | 3.807287 | 2.31061 | 1.647741 |
'''
set limit make search faster
'''
ix = self._index(m)
if fields is None:
fields = ix.fields
group = OrGroup if or_ else AndGroup
parser = MultifieldParser(fields, ix.schema, group=group)
return ix.search(parser.parse(query), limit=limit) | def msearch(self, m, query, fields=None, limit=None, or_=True) | set limit make search faster | 4.695549 | 3.736127 | 1.256796 |
"Update document not update index."
kw = dict(index=self.name, doc_type=self.doc_type, ignore=[404])
kw.update(**kwargs)
return self._client.update(**kw) | def update(self, **kwargs) | Update document not update index. | 6.569278 | 3.8565 | 1.70343 |
'''
Elasticsearch multi types has been removed
Use multi index unless set __msearch_index__.
'''
doc_type = model
if not isinstance(model, str):
doc_type = model.__table__.name
index_name = doc_type
if hasattr(model, "__msearch_index__"):
index_name = model.__msearch_index__
if doc_type not in self._indexs:
self._indexs[doc_type] = Index(self._client, index_name, doc_type)
return self._indexs[doc_type] | def _index(self, model) | Elasticsearch multi types has been removed
Use multi index unless set __msearch_index__. | 4.669003 | 2.351304 | 1.985708 |
text = item.read_text(encoding='utf-8')
for lib in vendored_libs:
text = re.sub(
r'(\n\s*)import %s(\n\s*)' % lib,
r'\1from pythonfinder._vendor import %s\2' % lib,
text,
)
text = re.sub(
r'(\n\s*)from %s' % lib,
r'\1from pythonfinder._vendor.%s' % lib,
text,
)
item.write_text(text, encoding='utf-8') | def rewrite_file_imports(item, vendored_libs) | Rewrite 'import xxx' and 'from xxx import' for vendored_libs | 2.235211 | 2.164589 | 1.032626 |
weights = []
n = float(self._rot_grid_points.shape[1])
for r_gps in self._rot_grid_points:
weights.append(np.sqrt(len(np.unique(r_gps)) / n))
return weights | def _get_weights(self) | Returns weights used for collision matrix and |X> and |f>
self._rot_grid_points : ndarray
shape=(ir_grid_points, point_operations), dtype='uintp'
r_gps : grid points of arms of k-star with duplicates
len(r_gps) == order of crystallographic point group
len(unique(r_gps)) == number of arms of the k-star
Returns
-------
weights : list
sqrt(g_k)/|g|, where g is the crystallographic point group and
g_k is the number of arms of k-star. | 6.333879 | 3.376886 | 1.875657 |
r_sum = np.zeros((3, 3), dtype='double', order='C')
for r in self._rotations_cartesian:
for i in range(3):
for j in range(3):
r_sum[i, j] += r[a, i] * r[b, j]
if plus_transpose:
r_sum += r_sum.T
# Return None not to consume computer for diagonalization
if (np.abs(r_sum) < 1e-10).all():
return None
# Same as np.kron(np.eye(size), r_sum), but writen as below
# to be sure the values in memory C-congiguous with 'double'.
I_mat = np.zeros((3 * size, 3 * size), dtype='double', order='C')
for i in range(size):
I_mat[(i * 3):((i + 1) * 3), (i * 3):((i + 1) * 3)] = r_sum
return I_mat | def _get_I(self, a, b, size, plus_transpose=True) | Return I matrix in Chaput's PRL paper.
None is returned if I is zero matrix. | 3.929593 | 3.792192 | 1.036233 |
X = self._get_X(i_temp, weights, self._gv).ravel()
num_ir_grid_points = len(self._ir_grid_points)
num_band = self._primitive.get_number_of_atoms() * 3
size = num_ir_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
solver = _select_solver(self._pinv_solver)
if solver in [1, 2, 4, 5]:
v = v.T
e = self._get_eigvals_pinv(i_sigma, i_temp)
t = self._temperatures[i_temp]
omega_inv = np.empty(v.shape, dtype='double', order='C')
np.dot(v, (e * v).T, out=omega_inv)
Y = np.dot(omega_inv, X)
self._set_f_vectors(Y, num_ir_grid_points, weights)
elems = ((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))
for i, vxf in enumerate(elems):
mat = self._get_I(vxf[0], vxf[1], num_ir_grid_points * num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] = 0
if mat is not None:
np.dot(mat, omega_inv, out=mat)
# vals = (X ** 2 * np.diag(mat)).reshape(-1, 3).sum(axis=1)
# vals = vals.reshape(num_ir_grid_points, num_band)
# self._mode_kappa[i_sigma, i_temp, :, :, i] = vals
w = diagonalize_collision_matrix(mat,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
if solver in [1, 2, 4, 5]:
mat = mat.T
spectra = np.dot(mat.T, X) ** 2 * w
for s, eigvec in zip(spectra, mat.T):
vals = s * (eigvec ** 2).reshape(-1, 3).sum(axis=1)
vals = vals.reshape(num_ir_grid_points, num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] += vals
factor = self._conversion_factor * Kb * t ** 2
self._mode_kappa[i_sigma, i_temp] *= factor | def _set_mode_kappa_Chaput(self, i_sigma, i_temp, weights) | Calculate mode kappa by the way in Laurent Chaput's PRL paper.
This gives the different result from _set_mode_kappa and requires more
memory space. | 3.38564 | 3.37874 | 1.002042 |
positions = cell.get_scaled_positions()
lattice = cell.get_cell().T
# Least displacements of first atoms (Atom 1) are searched by
# using respective site symmetries of the original crystal.
# 'is_diagonal=False' below is made intentionally to expect
# better accuracy.
disps_first = get_least_displacements(symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
symprec = symmetry.get_symmetry_tolerance()
dds = []
for disp in disps_first:
atom1 = disp[0]
disp1 = disp[1:4]
site_sym = symmetry.get_site_symmetry(atom1)
dds_atom1 = {'number': atom1,
'direction': disp1,
'second_atoms': []}
# Reduced site symmetry at the first atom with respect to
# the displacement of the first atoms.
reduced_site_sym = get_reduced_site_symmetry(site_sym, disp1, symprec)
# Searching orbits (second atoms) with respect to
# the first atom and its reduced site symmetry.
second_atoms = get_least_orbits(atom1,
cell,
reduced_site_sym,
symprec)
for atom2 in second_atoms:
dds_atom2 = get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal)
min_vec = get_equivalent_smallest_vectors(atom1,
atom2,
cell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
dds_atom2['distance'] = min_distance
dds_atom1['second_atoms'].append(dds_atom2)
dds.append(dds_atom1)
return dds | def get_third_order_displacements(cell,
symmetry,
is_plusminus='auto',
is_diagonal=False) | Create dispalcement dataset
Note
----
Atoms 1, 2, and 3 are defined as follows:
Atom 1: The first displaced atom. Third order force constant
between Atoms 1, 2, and 3 is calculated.
Atom 2: The second displaced atom. Second order force constant
between Atoms 2 and 3 is calculated.
Atom 3: Force is mesuared on this atom.
Parameters
----------
cell : PhonopyAtoms
Supercell
symmetry : Symmetry
Symmetry of supercell
is_plusminus : str or bool, optional
Type of displacements, plus only (False), always plus and minus (True),
and plus and minus depending on site symmetry ('auto').
is_diagonal : bool, optional
Whether allow diagonal displacements of Atom 2 or not
Returns
-------
dict
Data structure is like:
{'natom': 64,
'cutoff_distance': 4.000000,
'first_atoms':
[{'number': atom1,
'displacement': [0.03, 0., 0.],
'second_atoms': [ {'number': atom2,
'displacement': [0., -0.03, 0.],
'distance': 2.353},
{'number': ... }, ... ] },
{'number': atom1, ... } ]} | 3.679193 | 3.509857 | 1.048246 |
bond_sym = []
pos = positions
for rot in site_symmetry:
rot_pos = (np.dot(pos[atom_disp] - pos[atom_center], rot.T) +
pos[atom_center])
diff = pos[atom_disp] - rot_pos
diff -= np.rint(diff)
dist = np.linalg.norm(np.dot(lattice, diff))
if dist < symprec:
bond_sym.append(rot)
return np.array(bond_sym) | def get_bond_symmetry(site_symmetry,
lattice,
positions,
atom_center,
atom_disp,
symprec=1e-5) | Bond symmetry is the symmetry operations that keep the symmetry
of the cell containing two fixed atoms. | 2.851395 | 2.90362 | 0.982014 |
orbits = _get_orbits(atom_index, cell, site_symmetry, symprec)
mapping = np.arange(cell.get_number_of_atoms())
for i, orb in enumerate(orbits):
for num in np.unique(orb):
if mapping[num] > mapping[i]:
mapping[num] = mapping[i]
return np.unique(mapping) | def get_least_orbits(atom_index, cell, site_symmetry, symprec=1e-5) | Find least orbits for a centering atom | 2.87234 | 2.871451 | 1.000309 |
with h5py.File(filename, 'w') as w:
w.create_dataset('fc3', data=fc3, compression=compression)
if p2s_map is not None:
w.create_dataset('p2s_map', data=p2s_map) | def write_fc3_to_hdf5(fc3,
filename='fc3.hdf5',
p2s_map=None,
compression=None) | Write third-order force constants in hdf5 format.
Parameters
----------
force_constants : ndarray
Force constants
shape=(n_satom, n_satom, n_satom, 3, 3, 3) or
(n_patom, n_satom, n_satom,3,3,3), dtype=double
filename : str
Filename to be used.
p2s_map : ndarray, optional
Primitive atom indices in supercell index system
shape=(n_patom,), dtype=intc
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None. | 1.707276 | 1.906303 | 0.895596 |
suffix = _get_filename_suffix(mesh,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
hdf5_filename = "unitary" + suffix + ".hdf5"
with h5py.File(hdf5_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
if unitary_matrix is not None:
w.create_dataset('unitary_matrix', data=unitary_matrix)
if solver is not None:
w.create_dataset('solver', data=solver)
if verbose:
if len(temperature) > 1:
text = "Unitary matrices "
else:
text = "Unitary matrix "
if sigma is not None:
text += "at sigma %s " % _del_zeros(sigma)
if sigma_cutoff is not None:
text += "(%4.2f SD) " % sigma_cutoff
if len(temperature) > 1:
text += "were written into "
else:
text += "was written into "
if sigma is not None:
text += "\n"
text += "\"%s\"." % hdf5_filename
print(text) | def write_unitary_matrix_to_hdf5(temperature,
mesh,
unitary_matrix=None,
sigma=None,
sigma_cutoff=None,
solver=None,
filename=None,
verbose=False) | Write eigenvectors of collision matrices at temperatures.
Depending on the choice of the solver, eigenvectors are sotred in
either column-wise or row-wise. | 2.402239 | 2.422876 | 0.991482 |
if self._interaction is None:
self.set_phph_interaction()
if epsilons is None:
_epsilons = [0.1]
else:
_epsilons = epsilons
self._grid_points = grid_points
get_frequency_shift(self._interaction,
self._grid_points,
self._band_indices,
_epsilons,
temperatures,
output_filename=output_filename,
log_level=self._log_level) | def get_frequency_shift(
self,
grid_points,
temperatures=np.arange(0, 1001, 10, dtype='double'),
epsilons=None,
output_filename=None) | Frequency shift from lowest order diagram is calculated.
Args:
epslins(list of float):
The value to avoid divergence. When multiple values are given
frequency shifts for those values are returned. | 3.012777 | 3.574493 | 0.842854 |
self._primitive = self._get_primitive_cell(
self._supercell, self._supercell_matrix, self._primitive_matrix) | def _build_primitive_cell(self) | primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T | 5.242918 | 3.263283 | 1.606639 |
if self._phonon_supercell_matrix is None:
self._phonon_supercell = self._supercell
else:
self._phonon_supercell = get_supercell(
self._unitcell, self._phonon_supercell_matrix, self._symprec) | def _build_phonon_supercell(self) | phonon_supercell:
This supercell is used for harmonic phonons (frequencies,
eigenvectors, group velocities, ...)
phonon_supercell_matrix:
Different supercell size can be specified. | 2.531404 | 2.560153 | 0.98877 |
map_triplets, map_q, grid_address = _get_triplets_reciprocal_mesh_at_q(
grid_point,
mesh,
point_group,
is_time_reversal=is_time_reversal,
swappable=swappable)
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
triplets_at_q, weights = _get_BZ_triplets_at_q(
grid_point,
bz_grid_address,
bz_map,
map_triplets,
mesh)
assert np.prod(mesh) == weights.sum(), \
"Num grid points %d, sum of weight %d" % (
np.prod(mesh), weights.sum())
# These maps are required for collision matrix calculation.
if not stores_triplets_map:
map_triplets = None
map_q = None
return triplets_at_q, weights, bz_grid_address, bz_map, map_triplets, map_q | def get_triplets_at_q(grid_point,
mesh,
point_group, # real space point group of space group
reciprocal_lattice, # column vectors
is_time_reversal=True,
swappable=True,
stores_triplets_map=False) | Parameters
----------
grid_point : int
A grid point
mesh : array_like
Mesh numbers
dtype='intc'
shape=(3,)
point_group : array_like
Rotation matrices in real space. Note that those in reciprocal space
mean these matrices transposed (local terminology).
dtype='intc'
shape=(n_rot, 3, 3)
reciprocal_lattice : array_like
Reciprocal primitive basis vectors given as column vectors
dtype='double'
shape=(3, 3)
is_time_reversal : bool
Inversion symemtry is added if it doesn't exist.
swappable : bool
q1 and q2 can be swapped. By this number of triplets decreases.
Returns
-------
triplets_at_q : ndarray
Symmetry reduced number of triplets are stored as grid point
integer numbers.
dtype='uintp'
shape=(n_triplets, 3)
weights : ndarray
Weights of triplets in Brillouin zone
dtype='intc'
shape=(n_triplets,)
bz_grid_address : ndarray
Integer grid address of the points in Brillouin zone including
surface. The first prod(mesh) numbers of points are
independent. But the rest of points are
translational-symmetrically equivalent to some other points.
dtype='intc'
shape=(n_grid_points, 3)
bz_map : ndarray
Grid point mapping table containing BZ surface. See more
detail in spglib docstring.
dtype='uintp'
shape=(prod(mesh*2),)
map_tripelts : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Mapping table of all triplets to symmetrically
independent tripelts. More precisely, this gives a list of
index mapping from all q-points to independent q' of
q+q'+q''=G. Considering q' is enough because q is fixed and
q''=G-q-q' where G is automatically determined to choose
smallest |G|.
dtype='uintp'
shape=(prod(mesh),)
map_q : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Irreducible q-points stabilized by q-point of
specified grid_point.
dtype='uintp'
shape=(prod(mesh),) | 3.315285 | 2.766486 | 1.198374 |
import phono3py._phono3py as phono3c
map_triplets = np.zeros(np.prod(mesh), dtype='uintp')
map_q = np.zeros(np.prod(mesh), dtype='uintp')
grid_address = np.zeros((np.prod(mesh), 3), dtype='intc')
phono3c.triplets_reciprocal_mesh_at_q(
map_triplets,
map_q,
grid_address,
fixed_grid_number,
np.array(mesh, dtype='intc'),
is_time_reversal * 1,
np.array(rotations, dtype='intc', order='C'),
swappable * 1)
return map_triplets, map_q, grid_address | def _get_triplets_reciprocal_mesh_at_q(fixed_grid_number,
mesh,
rotations,
is_time_reversal=True,
swappable=True) | Search symmetry reduced triplets fixing one q-point
Triplets of (q0, q1, q2) are searched.
Parameters
----------
fixed_grid_number : int
Grid point of q0
mesh : array_like
Mesh numbers
dtype='intc'
shape=(3,)
rotations : array_like
Rotation matrices in real space. Note that those in reciprocal space
mean these matrices transposed (local terminology).
dtype='intc'
shape=(n_rot, 3, 3)
is_time_reversal : bool
Inversion symemtry is added if it doesn't exist.
swappable : bool
q1 and q2 can be swapped. By this number of triplets decreases. | 2.335346 | 2.341909 | 0.997198 |
# v[triplet, band0, band, band]
v = self._interaction_strength
w = self._weights_at_q
v_sum = np.dot(w, v.sum(axis=2).sum(axis=2))
return v_sum / np.prod(v.shape[2:]) | def get_averaged_interaction(self) | Return sum over phonon triplets of interaction strength
See Eq.(21) of PRB 91, 094306 (2015) | 8.449289 | 6.929925 | 1.219247 |
from alm import ALM
with ALM(lattice, positions, numbers) as alm:
natom = len(numbers)
alm.set_verbosity(log_level)
nkd = len(np.unique(numbers))
if 'cutoff_distance' not in alm_options:
rcs = -np.ones((2, nkd, nkd), dtype='double')
elif type(alm_options['cutoff_distance']) is float:
rcs = np.ones((2, nkd, nkd), dtype='double')
rcs[0] *= -1
rcs[1] *= alm_options['cutoff_distance']
alm.define(2, rcs)
alm.set_displacement_and_force(displacements, forces)
if 'solver' in alm_options:
solver = alm_options['solver']
else:
solver = 'SimplicialLDLT'
info = alm.optimize(solver=solver)
fc2 = extract_fc2_from_alm(alm,
natom,
atom_list=p2s_map,
p2s_map=p2s_map,
p2p_map=p2p_map)
fc3 = _extract_fc3_from_alm(alm,
natom,
p2s_map=p2s_map,
p2p_map=p2p_map)
return fc2, fc3 | def optimize(lattice,
positions,
numbers,
displacements,
forces,
alm_options=None,
p2s_map=None,
p2p_map=None,
log_level=0) | Calculate force constants
lattice : array_like
Basis vectors. a, b, c are given as column vectors.
shape=(3, 3), dtype='double'
positions : array_like
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : array_like
Atomic numbers.
shape=(num_atoms,), dtype='intc'
displacements : array_like
Atomic displacement patterns in supercells in Cartesian.
dtype='double', shape=(supercells, num_atoms, 3)
forces : array_like
Forces in supercells.
dtype='double', shape=(supercells, num_atoms, 3)
alm_options : dict, optional
Default is None.
List of keys
cutoff_distance : float
solver : str
Either 'SimplicialLDLT' or 'dense'. Default is
'SimplicialLDLT'. | 2.796423 | 2.467979 | 1.133082 |
natom = disp_dataset['natom']
ndisp = len(disp_dataset['first_atoms'])
for disp1 in disp_dataset['first_atoms']:
ndisp += len(disp1['second_atoms'])
disp = np.zeros((ndisp, natom, 3), dtype='double', order='C')
indices = []
count = 0
for disp1 in disp_dataset['first_atoms']:
indices.append(count)
disp[count, disp1['number']] = disp1['displacement']
count += 1
for disp1 in disp_dataset['first_atoms']:
for disp2 in disp1['second_atoms']:
if 'included' in disp2:
if disp2['included']:
indices.append(count)
else:
indices.append(count)
disp[count, disp1['number']] = disp1['displacement']
disp[count, disp2['number']] = disp2['displacement']
count += 1
return disp, indices | def _get_alm_disp_fc3(disp_dataset) | Create displacements of atoms for ALM input
Note
----
Dipslacements of all atoms in supercells for all displacement
configurations in phono3py are returned, i.e., most of
displacements are zero. Only the configurations with 'included' ==
True are included in the list of indices that is returned, too.
Parameters
----------
disp_dataset : dict
Displacement dataset that may be obtained by
file_IO.parse_disp_fc3_yaml.
Returns
-------
disp : ndarray
Displacements of atoms in supercells of all displacement
configurations.
shape=(ndisp, natom, 3)
dtype='double'
indices : list of int
The indices of the displacement configurations with 'included' == True. | 2.181884 | 1.949246 | 1.119347 |
n_satom = fc3.shape[1]
for i_target in target_atoms:
for i_done in first_disp_atoms:
rot_indices = np.where(permutations[:, i_target] == i_done)[0]
if len(rot_indices) > 0:
atom_mapping = np.array(permutations[rot_indices[0]],
dtype='intc')
rot = rotations[rot_indices[0]]
rot_cart_inv = np.array(
similarity_transformation(lattice, rot).T,
dtype='double', order='C')
break
if len(rot_indices) == 0:
print("Position or symmetry may be wrong.")
raise RuntimeError
if verbose > 2:
print(" [ %d, x, x ] to [ %d, x, x ]" %
(i_done + 1, i_target + 1))
sys.stdout.flush()
try:
import phono3py._phono3py as phono3c
phono3c.distribute_fc3(fc3,
int(s2compact[i_target]),
int(s2compact[i_done]),
atom_mapping,
rot_cart_inv)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for j in range(n_satom):
j_rot = atom_mapping[j]
for k in range(n_satom):
k_rot = atom_mapping[k]
fc3[i_target, j, k] = third_rank_tensor_rotation(
rot_cart_inv, fc3[i_done, j_rot, k_rot]) | def distribute_fc3(fc3,
first_disp_atoms,
target_atoms,
lattice,
rotations,
permutations,
s2compact,
verbose=False) | Distribute fc3
fc3[i, :, :, 0:3, 0:3, 0:3] where i=indices done are distributed to
symmetrically equivalent fc3 elements by tensor rotations.
Search symmetry operation (R, t) that performs
i_target -> i_done
and
atom_mapping[i_target] = i_done
fc3[i_target, j_target, k_target] = R_inv[i_done, j, k]
Parameters
----------
target_atoms: list or ndarray
Supercell atom indices to which fc3 are distributed.
s2compact: ndarray
Maps supercell index to compact index. For full-fc3,
s2compact=np.arange(n_satom).
shape=(n_satom,)
dtype=intc | 3.277839 | 2.952046 | 1.110362 |
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
num_atom = supercell.get_number_of_atoms()
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
atom_list = np.unique([x['number'] for x in dataset_second_atoms])
for atom2 in atom_list:
disps2 = []
sets_of_forces = []
for disps_second in dataset_second_atoms:
if atom2 != disps_second['number']:
continue
bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
disps2.append(disps_second['displacement'])
sets_of_forces.append(disps_second['delta_forces'])
solve_force_constants(fc2,
atom2,
disps2,
sets_of_forces,
supercell,
bond_sym,
symprec)
# Shift positions according to set atom1 is at origin
pos_center = positions[atom1].copy()
positions -= pos_center
rotations = np.array(reduced_site_sym, dtype='intc', order='C')
translations = np.zeros((len(reduced_site_sym), 3),
dtype='double', order='C')
permutations = compute_all_sg_permutations(positions,
rotations,
translations,
lattice,
symprec)
distribute_force_constants(fc2,
atom_list,
lattice,
rotations,
permutations)
return fc2 | def get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
reduced_site_sym,
symprec) | dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...] | 3.332068 | 2.997408 | 1.11165 |
for keyword in node.keywords:
if keyword.arg == 'name' and '-' in keyword.value.s:
return DJ04(
lineno=node.lineno,
col=node.col_offset,
) | def capture_dash_in_url_name(self, node) | Capture dash in URL name | 6.369946 | 5.684763 | 1.12053 |
for arg in node.args:
if not(isinstance(arg, ast.Call) and isinstance(arg.func, ast.Name)):
continue
if arg.func.id != 'include':
continue
for keyword in arg.keywords:
if keyword.arg == 'namespace':
return
return DJ05(
lineno=node.lineno,
col=node.col_offset,
) | def capture_url_missing_namespace(self, node) | Capture missing namespace in url include. | 3.550885 | 3.209871 | 1.106239 |
if self.get_call_name(node) != 'render':
return
issues = []
for arg in node.args:
if isinstance(arg, ast.Call) and arg.func.id == 'locals':
issues.append(
DJ03(
lineno=node.lineno,
col=node.col_offset,
)
)
return issues | def run(self, node) | Captures the use of locals() in render function. | 4.440694 | 3.660784 | 1.213044 |
if isinstance(node.func, ast.Attribute):
return node.func.attr
elif isinstance(node.func, ast.Name):
return node.func.id | def get_call_name(self, node) | Return call name for the given node. | 2.118142 | 1.960785 | 1.080252 |
message = self.description.format(**self.parameters)
return '{code} {message}'.format(code=self.code, message=message) | def message(self) | Return issue message. | 5.508441 | 4.759131 | 1.157447 |
if not isinstance(element.value, (ast.Str, ast.Bytes)):
return False
node_value = element.value.s
if isinstance(node_value, bytes):
node_value = node_value.decode()
return node_value == '__all__' | def is_string_dunder_all(self, element) | Return True if element is ast.Str or ast.Bytes and equals "__all__" | 2.689075 | 2.235655 | 1.202813 |
if not self.checker_applies(node):
return
issues = []
for body in node.body:
if not isinstance(body, ast.ClassDef):
continue
for element in body.body:
if not isinstance(element, ast.Assign):
continue
for target in element.targets:
if target.id == 'fields' and self.is_string_dunder_all(element):
issues.append(
DJ07(
lineno=node.lineno,
col=node.col_offset,
)
)
elif target.id == 'exclude':
issues.append(
DJ06(
lineno=node.lineno,
col=node.col_offset,
)
)
return issues | def run(self, node) | Captures the use of exclude in ModelForm Meta | 3.191218 | 3.03149 | 1.05269 |
return (
isinstance(base, ast.Name) and
base.id == self.model_name_lookup
) | def is_model_name_lookup(self, base) | Return True if class is defined as the respective model name lookup declaration | 4.240948 | 3.390655 | 1.250776 |
return (
isinstance(base, ast.Attribute) and
isinstance(base.value, ast.Name) and
base.value.id == 'models' and base.attr == self.model_name_lookup
) | def is_models_name_lookup_attribute(self, base) | Return True if class is defined as the respective model name lookup declaration | 2.669595 | 2.4573 | 1.086393 |
# Scheme: stream
if hasattr(source, 'read'):
return ('stream', None)
# Format: inline
if not isinstance(source, six.string_types):
return (None, 'inline')
# Format: gsheet
if 'docs.google.com/spreadsheets' in source:
if 'export' not in source and 'pub' not in source:
return (None, 'gsheet')
elif 'csv' in source:
return ('https', 'csv')
# Format: sql
for sql_scheme in config.SQL_SCHEMES:
if source.startswith('%s://' % sql_scheme):
return (None, 'sql')
# General
parsed = urlparse(source)
scheme = parsed.scheme.lower()
if len(scheme) < 2:
scheme = config.DEFAULT_SCHEME
format = os.path.splitext(parsed.path or parsed.netloc)[1][1:].lower() or None
if format is None:
# Test if query string contains a "format=" parameter.
query_string = parse_qs(parsed.query)
query_string_format = query_string.get("format")
if query_string_format is not None and len(query_string_format) == 1:
format = query_string_format[0]
# Format: datapackage
if parsed.path.endswith('datapackage.json'):
return (None, 'datapackage')
return (scheme, format) | def detect_scheme_and_format(source) | Detect scheme and format based on source and return as a tuple.
Scheme is a minimum 2 letters before `://` (will be lower cased).
For example `http` from `http://example.com/table.csv` | 3.010142 | 2.887863 | 1.042342 |
# To reduce tabulator import time
from cchardet import detect
if encoding is not None:
return normalize_encoding(sample, encoding)
result = detect(sample)
confidence = result['confidence'] or 0
encoding = result['encoding'] or 'ascii'
encoding = normalize_encoding(sample, encoding)
if confidence < config.ENCODING_CONFIDENCE:
encoding = config.DEFAULT_ENCODING
if encoding == 'ascii':
encoding = config.DEFAULT_ENCODING
return encoding | def detect_encoding(sample, encoding=None) | Detect encoding of a byte string sample. | 4.075966 | 4.280178 | 0.952289 |
encoding = codecs.lookup(encoding).name
# Work around 'Incorrect detection of utf-8-sig encoding'
# <https://github.com/PyYoshi/cChardet/issues/28>
if encoding == 'utf-8':
if sample.startswith(codecs.BOM_UTF8):
encoding = 'utf-8-sig'
# Use the BOM stripping name (without byte-order) for UTF-16 encodings
elif encoding == 'utf-16-be':
if sample.startswith(codecs.BOM_UTF16_BE):
encoding = 'utf-16'
elif encoding == 'utf-16-le':
if sample.startswith(codecs.BOM_UTF16_LE):
encoding = 'utf-16'
return encoding | def normalize_encoding(sample, encoding) | Normalize encoding including 'utf-8-sig', 'utf-16-be', utf-16-le tweaks. | 3.194552 | 2.909344 | 1.098032 |
pattern = re.compile('\\s*<(!doctype|html)', re.IGNORECASE)
return bool(pattern.match(text)) | def detect_html(text) | Detect if text is HTML. | 5.541602 | 5.003158 | 1.107621 |
try:
position = stream.tell()
except Exception:
position = True
if position != 0:
try:
stream.seek(0)
except Exception:
message = 'It\'s not possible to reset this stream'
raise exceptions.TabulatorException(message) | def reset_stream(stream) | Reset stream pointer to the first element.
If stream is not seekable raise Exception. | 4.780504 | 4.717028 | 1.013457 |
# To reduce tabulator import time
import requests.utils
if six.PY2:
def url_encode_non_ascii(bytes):
pattern = '[\x80-\xFF]'
replace = lambda c: ('%%%02x' % ord(c.group(0))).upper()
return re.sub(pattern, replace, bytes)
parts = urlparse(uri)
uri = urlunparse(
part.encode('idna') if index == 1
else url_encode_non_ascii(part.encode('utf-8'))
for index, part in enumerate(parts))
return requests.utils.requote_uri(uri) | def requote_uri(uri) | Requote uri if it contains non-ascii chars, spaces etc. | 4.200934 | 4.119642 | 1.019733 |
module_name, attribute_name = path.rsplit('.', 1)
module = import_module(module_name)
attribute = getattr(module, attribute_name)
return attribute | def import_attribute(path) | Import attribute by path like `package.module.attribute` | 1.777422 | 1.861581 | 0.954792 |
result = {}
for name, value in copy(options).items():
if name in names:
result[name] = value
del options[name]
return result | def extract_options(options, names) | Return options for names and remove it from given options in-place. | 2.910776 | 2.371884 | 1.2272 |
if value is None:
return u''
isoformat = getattr(value, 'isoformat', None)
if isoformat is not None:
value = isoformat()
return type(u'')(value) | def stringify_value(value) | Convert any value to string. | 4.275932 | 4.068 | 1.051114 |
'''Opens the stream for reading.'''
options = copy(self.__options)
# Get scheme and format if not already given
compression = None
if self.__scheme is None or self.__format is None:
detected_scheme, detected_format = helpers.detect_scheme_and_format(self.__source)
scheme = self.__scheme or detected_scheme
format = self.__format or detected_format
# Get compression
for type in config.SUPPORTED_COMPRESSION:
if self.__compression == type or detected_format == type:
compression = type
else:
scheme = self.__scheme
format = self.__format
# Initiate loader
self.__loader = None
if scheme is not None:
loader_class = self.__custom_loaders.get(scheme)
if loader_class is None:
if scheme not in config.LOADERS:
message = 'Scheme "%s" is not supported' % scheme
raise exceptions.SchemeError(message)
loader_path = config.LOADERS[scheme]
if loader_path:
loader_class = helpers.import_attribute(loader_path)
if loader_class is not None:
loader_options = helpers.extract_options(options, loader_class.options)
if compression and 'http_stream' in loader_class.options:
loader_options['http_stream'] = False
self.__loader = loader_class(
bytes_sample_size=self.__bytes_sample_size,
**loader_options)
# Zip compression
if compression == 'zip' and six.PY3:
source = self.__loader.load(self.__source, mode='b')
with zipfile.ZipFile(source) as archive:
name = archive.namelist()[0]
if 'filename' in options.keys():
name = options['filename']
del options['filename']
with archive.open(name) as file:
source = tempfile.NamedTemporaryFile(suffix='.' + name)
for line in file:
source.write(line)
source.seek(0)
self.__source = source
self.__loader = StreamLoader(bytes_sample_size=self.__bytes_sample_size)
format = self.__format or helpers.detect_scheme_and_format(source.name)[1]
scheme = 'stream'
# Gzip compression
elif compression == 'gz' and six.PY3:
name = self.__source.replace('.gz', '')
self.__source = gzip.open(self.__loader.load(self.__source, mode='b'))
self.__loader = StreamLoader(bytes_sample_size=self.__bytes_sample_size)
format = self.__format or helpers.detect_scheme_and_format(name)[1]
scheme = 'stream'
# Not supported compression
elif compression:
message = 'Compression "%s" is not supported for your Python version'
raise exceptions.TabulatorException(message % compression)
# Initiate parser
parser_class = self.__custom_parsers.get(format)
if parser_class is None:
if format not in config.PARSERS:
message = 'Format "%s" is not supported' % format
raise exceptions.FormatError(message)
parser_class = helpers.import_attribute(config.PARSERS[format])
parser_options = helpers.extract_options(options, parser_class.options)
self.__parser = parser_class(self.__loader,
force_parse=self.__force_parse,
**parser_options)
# Bad options
if options:
message = 'Not supported option(s) "%s" for scheme "%s" and format "%s"'
message = message % (', '.join(options), scheme, format)
warnings.warn(message, UserWarning)
# Open and setup
self.__parser.open(self.__source, encoding=self.__encoding)
self.__extract_sample()
self.__extract_headers()
if not self.__allow_html:
self.__detect_html()
# Set scheme/format/encoding
self.__actual_scheme = scheme
self.__actual_format = format
self.__actual_encoding = self.__parser.encoding
return self | def open(self) | Opens the stream for reading. | 2.519401 | 2.50713 | 1.004894 |
'''Resets the stream pointer to the beginning of the file.'''
if self.__row_number > self.__sample_size:
self.__parser.reset()
self.__extract_sample()
self.__extract_headers()
self.__row_number = 0 | def reset(self) | Resets the stream pointer to the beginning of the file. | 7.149317 | 5.850908 | 1.221916 |
'''Returns the stream's rows used as sample.
These sample rows are used internally to infer characteristics of the
source file (e.g. encoding, headers, ...).
'''
sample = []
iterator = iter(self.__sample_extended_rows)
iterator = self.__apply_processors(iterator)
for row_number, headers, row in iterator:
sample.append(row)
return sample | def sample(self) | Returns the stream's rows used as sample.
These sample rows are used internally to infer characteristics of the
source file (e.g. encoding, headers, ...). | 12.296621 | 3.90969 | 3.145165 |
'''Iterate over the rows.
Each row is returned in a format that depends on the arguments `keyed`
and `extended`. By default, each row is returned as list of their
values.
Args:
keyed (bool, optional): When True, each returned row will be a
`dict` mapping the header name to its value in the current row.
For example, `[{'name': 'J Smith', 'value': '10'}]`. Ignored if
``extended`` is True. Defaults to False.
extended (bool, optional): When True, returns each row as a tuple
with row number (starts at 1), list of headers, and list of row
values. For example, `(1, ['name', 'value'], ['J Smith', '10'])`.
Defaults to False.
Returns:
Iterator[Union[List[Any], Dict[str, Any], Tuple[int, List[str], List[Any]]]]:
The row itself. The format depends on the values of `keyed` and
`extended` arguments.
Raises:
exceptions.TabulatorException: If the stream is closed.
'''
# Error if closed
if self.closed:
message = 'Stream is closed. Please call "stream.open()" first.'
raise exceptions.TabulatorException(message)
# Create iterator
iterator = chain(
self.__sample_extended_rows,
self.__parser.extended_rows)
iterator = self.__apply_processors(iterator)
# Yield rows from iterator
for row_number, headers, row in iterator:
if row_number > self.__row_number:
self.__row_number = row_number
if extended:
yield (row_number, headers, row)
elif keyed:
yield dict(zip(headers, row))
else:
yield row | def iter(self, keyed=False, extended=False) | Iterate over the rows.
Each row is returned in a format that depends on the arguments `keyed`
and `extended`. By default, each row is returned as list of their
values.
Args:
keyed (bool, optional): When True, each returned row will be a
`dict` mapping the header name to its value in the current row.
For example, `[{'name': 'J Smith', 'value': '10'}]`. Ignored if
``extended`` is True. Defaults to False.
extended (bool, optional): When True, returns each row as a tuple
with row number (starts at 1), list of headers, and list of row
values. For example, `(1, ['name', 'value'], ['J Smith', '10'])`.
Defaults to False.
Returns:
Iterator[Union[List[Any], Dict[str, Any], Tuple[int, List[str], List[Any]]]]:
The row itself. The format depends on the values of `keyed` and
`extended` arguments.
Raises:
exceptions.TabulatorException: If the stream is closed. | 3.563071 | 1.665404 | 2.139464 |
'''Save stream to the local filesystem.
Args:
target (str): Path where to save the stream.
format (str, optional): The format the stream will be saved as. If
None, detects from the ``target`` path. Defaults to None.
encoding (str, optional): Saved file encoding. Defaults to
``config.DEFAULT_ENCODING``.
**options: Extra options passed to the writer.
'''
# Get encoding/format
if encoding is None:
encoding = config.DEFAULT_ENCODING
if format is None:
_, format = helpers.detect_scheme_and_format(target)
# Prepare writer class
writer_class = self.__custom_writers.get(format)
if writer_class is None:
if format not in config.WRITERS:
message = 'Format "%s" is not supported' % format
raise exceptions.FormatError(message)
writer_class = helpers.import_attribute(config.WRITERS[format])
# Prepare writer options
writer_options = helpers.extract_options(options, writer_class.options)
if options:
message = 'Not supported options "%s" for format "%s"'
message = message % (', '.join(options), format)
raise exceptions.TabulatorException(message)
# Write data to target
writer = writer_class(**writer_options)
writer.write(self.iter(), target, headers=self.headers, encoding=encoding) | def save(self, target, format=None, encoding=None, **options) | Save stream to the local filesystem.
Args:
target (str): Path where to save the stream.
format (str, optional): The format the stream will be saved as. If
None, detects from the ``target`` path. Defaults to None.
encoding (str, optional): Saved file encoding. Defaults to
``config.DEFAULT_ENCODING``.
**options: Extra options passed to the writer. | 3.167773 | 2.254981 | 1.404789 |
'''Check if tabulator is able to load the source.
Args:
source (Union[str, IO]): The source path or IO object.
scheme (str, optional): The source scheme. Auto-detect by default.
format (str, optional): The source file format. Auto-detect by default.
Returns:
bool: Whether tabulator is able to load the source file.
Raises:
`tabulator.exceptions.SchemeError`: The file scheme is not supported.
`tabulator.exceptions.FormatError`: The file format is not supported.
'''
# Get scheme and format
detected_scheme, detected_format = helpers.detect_scheme_and_format(source)
scheme = scheme or detected_scheme
format = format or detected_format
# Validate scheme and format
if scheme is not None:
if scheme not in config.LOADERS:
raise exceptions.SchemeError('Scheme "%s" is not supported' % scheme)
if format not in config.PARSERS:
raise exceptions.FormatError('Format "%s" is not supported' % format)
return True | def validate(source, scheme=None, format=None) | Check if tabulator is able to load the source.
Args:
source (Union[str, IO]): The source path or IO object.
scheme (str, optional): The source scheme. Auto-detect by default.
format (str, optional): The source file format. Auto-detect by default.
Returns:
bool: Whether tabulator is able to load the source file.
Raises:
`tabulator.exceptions.SchemeError`: The file scheme is not supported.
`tabulator.exceptions.FormatError`: The file format is not supported. | 2.5685 | 1.664657 | 1.54296 |
@wraps(f)
def _wrapper(self, *args, **kwargs):
if None in (self.axis, self.sel_axis):
raise ValueError('%(func_name) requires the node %(node)s '
'to have an axis and a sel_axis function' %
dict(func_name=f.__name__, node=repr(self)))
return f(self, *args, **kwargs)
return _wrapper | def require_axis(f) | Check if the object of the function has axis and sel_axis members | 4.124597 | 3.456639 | 1.193239 |
if not point_list and not dimensions:
raise ValueError('either point_list or dimensions must be provided')
elif point_list:
dimensions = check_dimensionality(point_list, dimensions)
# by default cycle through the axis
sel_axis = sel_axis or (lambda prev_axis: (prev_axis+1) % dimensions)
if not point_list:
return KDNode(sel_axis=sel_axis, axis=axis, dimensions=dimensions)
# Sort point list and choose median as pivot element
point_list = list(point_list)
point_list.sort(key=lambda point: point[axis])
median = len(point_list) // 2
loc = point_list[median]
left = create(point_list[:median], dimensions, sel_axis(axis))
right = create(point_list[median + 1:], dimensions, sel_axis(axis))
return KDNode(loc, left, right, axis=axis, sel_axis=sel_axis, dimensions=dimensions) | def create(point_list=None, dimensions=None, axis=0, sel_axis=None) | Creates a kd-tree from a list of points
All points in the list must be of the same dimensionality.
If no point_list is given, an empty tree is created. The number of
dimensions has to be given instead.
If both a point_list and dimensions are given, the numbers must agree.
Axis is the axis on which the root-node should split.
sel_axis(axis) is used when creating subnodes of a node. It receives the
axis of the parent node and returns the axis of the child node. | 2.994406 | 2.807574 | 1.066545 |
q = deque()
q.append(tree)
while q:
node = q.popleft()
yield node
if include_all or node.left:
q.append(node.left or node.__class__())
if include_all or node.right:
q.append(node.right or node.__class__()) | def level_order(tree, include_all=False) | Returns an iterator over the tree in level-order
If include_all is set to True, empty parts of the tree are filled
with dummy entries and the iterator becomes infinite. | 2.179521 | 2.173403 | 1.002815 |
height = min(max_level, tree.height()-1)
max_width = pow(2, height)
per_level = 1
in_level = 0
level = 0
for node in level_order(tree, include_all=True):
if in_level == 0:
print()
print()
print(' '*left_padding, end=' ')
width = int(max_width*node_width/per_level)
node_str = (str(node.data) if node else '').center(width)
print(node_str, end=' ')
in_level += 1
if in_level == per_level:
in_level = 0
per_level *= 2
level += 1
if level > height:
break
print()
print() | def visualize(tree, max_level=100, node_width=10, left_padding=5) | Prints the tree to stdout | 3.389706 | 3.351714 | 1.011335 |
return (not self.data) or \
(all(not bool(c) for c, p in self.children)) | def is_leaf(self) | Returns True if a Node has no subnodes
>>> Node().is_leaf
True
>>> Node( 1, left=Node(2) ).is_leaf
False | 13.553359 | 18.794668 | 0.721128 |
if not self:
return
yield self
if self.left:
for x in self.left.preorder():
yield x
if self.right:
for x in self.right.preorder():
yield x | def preorder(self) | iterator for nodes: root, left, right | 2.477237 | 2.057212 | 1.204172 |
if not self:
return
if self.left:
for x in self.left.inorder():
yield x
yield self
if self.right:
for x in self.right.inorder():
yield x | def inorder(self) | iterator for nodes: left, root, right | 2.557256 | 2.048384 | 1.248426 |
if not self:
return
if self.left:
for x in self.left.postorder():
yield x
if self.right:
for x in self.right.postorder():
yield x
yield self | def postorder(self) | iterator for nodes: left, right, root | 2.51739 | 1.989362 | 1.265426 |
if self.left and self.left.data is not None:
yield self.left, 0
if self.right and self.right.data is not None:
yield self.right, 1 | def children(self) | Returns an iterator for the non-empty children of the Node
The children are returned as (Node, pos) tuples where pos is 0 for the
left subnode and 1 for the right.
>>> len(list(create(dimensions=2).children))
0
>>> len(list(create([ (1, 2) ]).children))
0
>>> len(list(create([ (2, 2), (2, 1), (2, 3) ]).children))
2 | 3.303949 | 2.65757 | 1.243222 |
if index == 0:
self.left = child
else:
self.right = child | def set_child(self, index, child) | Sets one of the node's children
index 0 refers to the left, 1 to the right child | 4.114037 | 2.703019 | 1.522015 |
min_height = int(bool(self))
return max([min_height] + [c.height()+1 for c, p in self.children]) | def height(self) | Returns height of the (sub)tree, without considering
empty leaf-nodes
>>> create(dimensions=2).height()
0
>>> create([ (1, 2) ]).height()
1
>>> create([ (1, 2), (2, 3) ]).height()
2 | 14.298589 | 11.933444 | 1.198195 |
for c, pos in self.children:
if child == c:
return pos | def get_child_pos(self, child) | Returns the position if the given child
If the given node is the left child, 0 is returned. If its the right
child, 1 is returned. Otherwise None | 6.895849 | 6.948585 | 0.992411 |
current = self
while True:
check_dimensionality([point], dimensions=current.dimensions)
# Adding has hit an empty leaf-node, add here
if current.data is None:
current.data = point
return current
# split on self.axis, recurse either left or right
if point[current.axis] < current.data[current.axis]:
if current.left is None:
current.left = current.create_subnode(point)
return current.left
else:
current = current.left
else:
if current.right is None:
current.right = current.create_subnode(point)
return current.right
else:
current = current.right | def add(self, point) | Adds a point to the current node or iteratively
descends to one of its children.
Users should call add() only to the topmost tree. | 3.849731 | 3.659591 | 1.051956 |
return self.__class__(data,
axis=self.sel_axis(self.axis),
sel_axis=self.sel_axis,
dimensions=self.dimensions) | def create_subnode(self, data) | Creates a subnode for the current node | 8.669241 | 8.679764 | 0.998788 |
if self.right:
child, parent = self.right.extreme_child(min, self.axis)
else:
child, parent = self.left.extreme_child(max, self.axis)
return (child, parent if parent is not None else self) | def find_replacement(self) | Finds a replacement for the current node
The replacement is returned as a
(replacement-node, replacements-parent-node) tuple | 5.216439 | 4.390949 | 1.187998 |
if not self.data == point:
return False
return (node is None) or (node is self) | def should_remove(self, point, node) | checks if self's point (and maybe identity) matches | 9.007436 | 7.688783 | 1.171503 |
# Recursion has reached an empty leaf node, nothing here to delete
if not self:
return
# Recursion has reached the node to be deleted
if self.should_remove(point, node):
return self._remove(point)
# Remove direct subnode
if self.left and self.left.should_remove(point, node):
self.left = self.left._remove(point)
elif self.right and self.right.should_remove(point, node):
self.right = self.right._remove(point)
# Recurse to subtrees
if point[self.axis] <= self.data[self.axis]:
if self.left:
self.left = self.left.remove(point, node)
if point[self.axis] >= self.data[self.axis]:
if self.right:
self.right = self.right.remove(point, node)
return self | def remove(self, point, node=None) | Removes the node with the given point from the tree
Returns the new root node of the (sub)tree.
If there are multiple points matching "point", only one is removed. The
optional "node" parameter is used for checking the identity, once the
removeal candidate is decided. | 2.6401 | 2.602503 | 1.014446 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.