seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
498662479
|
#!/usr/bin/env python
# Copyright: (c) 2019, Tanner Dowhy <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: Create conda environment.
version_added: "2.7"
author: "Tanner Dowhy (@TannerDowhy)"
options:
name:
description:
- This is the message to send to the sample module
required: true
new:
description:
- Control to demo if the result of this module is changed or not
required: false
notes:
- This module will make you cool.
'''
EXAMPLES = '''
# Pass in a message
- name: Test with a message
my_new_test_module:
name: hello world
# pass in a message and have changed true
- name: Test with a message and changed output
my_new_test_module:
name: hello world
new: true
# fail the module
- name: Test failure of the module
my_new_test_module:
name: fail me
'''
RETURN = '''
original_message:
description: The original name param that was passed in
type: str
message:
description: The output message that the sample module generates
'''
from ansible.module_utils.basic import AnsibleModule
from biolighthouse.module_utils.Conda.conda import *
def conda_install_arg_spec(**kwargs):
spec = dict(
state=dict(type='str', choices=['present', 'absent', 'latest'], default='present'),
name=dict(type='list', default=[], requred=True),
version=dict(required=False),
env_path=dict(type='path', default=None, required=False),
environment=dict(type='str', default=None, required=False),
channel=dict(type='list', defualt=[], requred=False),
executable=dict(type='path', required=False)
)
spec.update(kwargs)
return spec
def run_module():
argument_spec=conda_install_arg_spec()
module = AnsibleModule(argument_spec,
supports_check_mode=True
)
result = dict(
changed=False,
actions = []
)
state = module.params['state']
name = module.params['name']
version = module.params['version']
env_path = module.params['env_path']
environment = module.params['environment']
channel = module.params['channel']
executable = module.params['executable']
conda = Conda(module, environment)
if environment:
env_exists = conda.check_env(environment)
if not env_exists:
result['msg'] = "%s environment does not exist." % environment
target_packages = [conda.split_name_version(n, version) for n in name]
installed_packages = conda.list_packages(environment)
if state == 'present':
absent_packages = conda.get_absent_packages(target_packages, installed_packages, check_version=True)
if absent_packages:
if not module.check_mode:
actions = conda.install_packages(absent_packages, channel)
result['actions'] += actions
result['changed'] = True
elif state == 'absent':
present_packages = conda.get_present_packages(
target_packages, installed_packages,
check_version=False)
if present_packages:
names = [p['name'] for p in present_packages]
if not module.check_mode:
actions = conda.remove_packages(names, channel)
result['actions'] += actions
result['changed'] = True
elif state == 'latest':
# Find missing packages first
absent_packages = conda.get_absent_packages(target_packages,
installed_packages,
check_version=False)
present_packages = conda.get_present_packages(target_packages,
installed_packages,
check_version=False)
if absent_packages:
if not module.check_mode:
actions = conda.install_packages(absent_packages, channel)
result['actions'] += actions
result['changed'] = True
if present_packages:
# Check what needs to be updated with a dry run
names = [p['name'] for p in present_packages]
dry_actions = conda.update_packages(names, channel, dry_run=True)
if dry_actions:
if not module.check_mode:
actions = conda.update_packages(names, channel)
result['actions'] += actions
result['changed'] = True
if module.check_mode:
return result
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| null |
biolighthouse/modules/Conda/conda_install.py
|
conda_install.py
|
py
| 5,087 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ansible.module_utils.basic.AnsibleModule",
"line_number": 74,
"usage_type": "call"
}
] |
91127038
|
from symtype import KT, SymType
from scanner import Scanner
import stmts
from copy import copy
import traceback
import typez
import sys
import pygame
pygame.init()
window = pygame.display.set_mode((640, 480))
class Parser(object):
def __init__(self, program):
super(Parser, self).__init__()
self.scanner = Scanner(program)
self.symbol = ""
self.statementList = []
self.__nextSymbol()
self.relops = [SymType.eqop, SymType.ltop, SymType.leop, SymType.geop, SymType.gtop]
self.predefFigures = [SymType.circle, SymType.square, SymType.point, SymType.line]
self.colors = [SymType.black, SymType.notblack]
def __nextSymbol(self):
self.symbol = self.scanner.nextSymbol()
def __accept(self, atom):
if self.symbol == atom:
self.__nextSymbol()
return 1
else:
return 0
def __expect(self, atom):
if self.__accept(atom):
return 1
else:
self.__syntaxError(atom)
return 0
def __syntaxError(self, atom):
self.scanner.scanError(atom, "Spodziewany symbol: ", atom)
raise Exception
def __syntaxError1(self, atom):
self.scanner.scanError(atom, "Nieoczekiwany symbol: ", atom)
raise Exception
def program(self):
i = 0
while self.symbol != SymType.others:
self.statementList.extend([self.__statement()])
i = i + 1
if i > 1000:
raise Exception("Zbyt wiele instrukcji.")
def __statement(self):
r = self.__assignmentStmt()
if r != None:
return r
r = self.__incrementStmt()
if r != None:
return r
r = self.__decrementStmt()
if r != None:
return r
r = self.__whileStmt()
if r != None:
return r
r = self.__ifStmt()
if r != None:
return r
r = self.__drawStmt()
if r != None:
return r
r = self.__moveStmt()
if r != None:
return r
r = self.__printStmt()
if r != None:
return r
else:
self.__syntaxError1(self.symbol)
return r
def __assignmentStmt(self):
if self.symbol == SymType.ident :
varName = self.scanner.spell
self.__nextSymbol()
if self.__accept(SymType.becomes):
value = self.__number()
return stmts.AssignmentStmt(varName, value)
elif self.symbol == SymType.beginsy:
return self.__figureAssignmentStmt(varName)
else:
self.__syntaxError1(self.symbol)
else:
return None
def __figureAssignmentStmt(self, figureName):
statements = self.__block()
figure = typez.MyFigure(figureName, statements)
return stmts.FigureAssignmentStmt(figureName, figure)
def __incrementStmt(self):
if self.__accept(SymType.increment) :
if self.symbol == SymType.ident:
ident = self.scanner.spell
self.__nextSymbol()
else:
self.__syntaxError1(self.symbol)
return stmts.IncrementStmt(ident)
else:
return None
def __decrementStmt(self):
if self.__accept(SymType.decrement) :
if self.symbol == SymType.ident:
ident = self.scanner.spell
self.__nextSymbol()
else:
self.__syntaxError1(self.symbol)
return stmts.DecrementStmt(ident)
else:
return None
def __whileStmt(self):
if self.__accept(SymType.whilesy) :
condition = self.__condition()
statements = self.__block()
return stmts.WhileStmt(condition, statements)
else:
return None
def __ifStmt(self):
if self.__accept(SymType.ifsy) :
condition = self.__condition()
statements = self.__block()
return stmts.IfStmt(condition, statements)
else:
return None
def __drawStmt(self):
if self.__accept(SymType.draw) :
if self.symbol in self.predefFigures:
figureType = self.symbol
self.__nextSymbol()
name = self.scanner.spell
self.__expect(SymType.ident)
position = self.__position()
if figureType == SymType.circle:
size = self.__number()
color = self.symbol
if self.symbol in self.colors:
self.__nextSymbol()
else:
self.__syntaxError1(self.symbol)
fig = typez.Circle(name, position, color, size, window)
elif figureType == SymType.square:
size = self.__number()
color = self.symbol
if self.symbol in self.colors:
self.__nextSymbol()
else:
self.__syntaxError1(self.symbol)
fig = typez.Square(name, position, color, size, window)
elif figureType == SymType.point:
color = self.symbol
if self.symbol in self.colors:
self.__nextSymbol()
else:
self.__syntaxError1(self.symbol)
fig = typez.Point(name, position, color, window)
elif figureType == SymType.line:
positionEnd = self.__position()
color = self.symbol
if self.symbol in self.colors:
self.__nextSymbol()
else:
self.__syntaxError1(self.symbol)
fig = typez.Line(name, position, positionEnd, color, window)
return stmts.DrawWithAssignStmt(name, fig)
elif self.symbol == SymType.ident:
name = self.scanner.spell
self.__expect(SymType.ident)
return stmts.DrawStmt(name)
else:
self.__syntaxError1(self.symbol)
elif self.symbol == SymType.ident:
name = self.scanner.spell
self.__expect(SymType.ident)
return stmts.DrawStmt(name)
else:
return None
def __moveStmt(self):
if self.__accept(SymType.move) :
name = self.scanner.spell
if self.__expect(SymType.ident):
position = self.__position()
return stmts.MoveStmt(name, position)
else:
self.__syntaxError1(self.symbol)
else:
return None
def __printStmt(self):
if self.__accept(SymType.printz):
self.__accept(SymType.charconst)
return stmts.Printz(self.scanner.strConst)
else:
return None
def __condition(self):
lvalue = self.__number()
relation = SymType.others
if self.symbol in self.relops:
relation = self.symbol
self.__nextSymbol()
else:
self.__syntaxError1(self.symbol)
rvalue = self.__number()
return stmts.Condition(lvalue, relation, rvalue)
def __block(self):
self.__expect(SymType.beginsy)
i = 0
statements = []
while self.symbol != SymType.others and self.symbol != SymType.endsy:
statements.extend([self.__statement()])
i = i + 1
if i > 1000:
raise Exception("Zbyt wiele instrukcji w bloku.")
self.__expect(SymType.endsy)
return statements
def __number(self):
if self.symbol == SymType.intconst:
value = self.scanner.intConst
self.__nextSymbol()
elif self.symbol == SymType.ident:
value = self.scanner.spell
self.__nextSymbol()
else :
self.__syntaxError1(self.symbol)
return value
def __position(self):
self.__expect(SymType.lparent)
firstCoord = self.__number()
self.__expect(SymType.comma)
secondCoord = self.__number()
self.__expect(SymType.rparent)
return typez.Position(firstCoord, secondCoord)
class Interpreter(object):
def __init__(self, parser):
super(Interpreter, self).__init__()
self.parser = parser
def run(self):
self.parser.program()
scope = {}
window.fill((250, 250, 250))
for s in self.parser.statementList:
scope = s.execute(scope)
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
i = Interpreter(Parser(sys.argv[1]))
i.run()
| null |
parser.py
|
parser.py
|
py
| 7,664 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "scanner.Scanner",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.eqop",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.ltop",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType.leop",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType.geop",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType.gtop",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType.circle",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.square",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType.point",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType.line",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType.black",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.notblack",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType.others",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.becomes",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "stmts.AssignmentStmt",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.beginsy",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "typez.MyFigure",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "stmts.FigureAssignmentStmt",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.increment",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "stmts.IncrementStmt",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.decrement",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "stmts.DecrementStmt",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.whilesy",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "stmts.WhileStmt",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.ifsy",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "stmts.IfStmt",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.draw",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.circle",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "typez.Circle",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.square",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "typez.Square",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.point",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "typez.Point",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.line",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "typez.Line",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "stmts.DrawWithAssignStmt",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "stmts.DrawStmt",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "stmts.DrawStmt",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.move",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "stmts.MoveStmt",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.printz",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.charconst",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "stmts.Printz",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.others",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "stmts.Condition",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "symtype.SymType.beginsy",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.others",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.endsy",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType.endsy",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.intconst",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.ident",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.lparent",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.comma",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "symtype.SymType.rparent",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "symtype.SymType",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "typez.Position",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "pygame.display.flip",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 275,
"usage_type": "attribute"
}
] |
283272705
|
import attr
from pandas.core.util.hashing import hash_pandas_object
from dbnd._core.utils import json_utils
from dbnd._vendor import fast_hasher
from targets.value_meta import ValueMeta, ValueMetaConf
from targets.values.pandas_values import DataFrameValueType
class TestDataFrameValueType(object):
def test_df_value_meta(self, pandas_data_frame):
expected_data_schema = {
"type": DataFrameValueType.type_str,
"columns": list(pandas_data_frame.columns),
"size.bytes": int(pandas_data_frame.size),
"shape": pandas_data_frame.shape,
"dtypes": {
col: str(type_) for col, type_ in pandas_data_frame.dtypes.items()
},
}
meta_conf = ValueMetaConf.enabled()
expected_value_meta = ValueMeta(
value_preview=DataFrameValueType().to_preview(
pandas_data_frame, preview_size=meta_conf.get_preview_size()
),
data_dimensions=pandas_data_frame.shape,
data_schema=expected_data_schema,
data_hash=fast_hasher.hash(
hash_pandas_object(pandas_data_frame, index=True).values
),
)
df_value_meta = DataFrameValueType().get_value_meta(
pandas_data_frame, meta_conf=meta_conf
)
assert df_value_meta.value_preview == expected_value_meta.value_preview
assert df_value_meta.data_hash == expected_value_meta.data_hash
assert json_utils.dumps(df_value_meta.data_schema) == json_utils.dumps(
expected_value_meta.data_schema
)
assert df_value_meta.data_dimensions == expected_value_meta.data_dimensions
assert df_value_meta.data_schema == expected_value_meta.data_schema
# histograms and stats are tested in histogram tests and they change a lot, no need to test also here
assert set(df_value_meta.descriptive_stats.keys()) == {"Names", "Births"}
assert set(df_value_meta.histograms.keys()) == {"Names", "Births"}
| null |
modules/dbnd/test_dbnd/targets_tests/values/test_dataframe_value_type.py
|
test_dataframe_value_type.py
|
py
| 2,039 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "targets.values.pandas_values.DataFrameValueType.type_str",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "targets.values.pandas_values.DataFrameValueType",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "targets.value_meta.ValueMetaConf.enabled",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "targets.value_meta.ValueMetaConf",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "targets.value_meta.ValueMeta",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "targets.values.pandas_values.DataFrameValueType",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "dbnd._vendor.fast_hasher.hash",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "dbnd._vendor.fast_hasher",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pandas.core.util.hashing.hash_pandas_object",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "targets.values.pandas_values.DataFrameValueType",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "dbnd._core.utils.json_utils.dumps",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "dbnd._core.utils.json_utils",
"line_number": 41,
"usage_type": "name"
}
] |
292873195
|
import os
import sys
import json
import argparse
from allennlp.commands.subcommand import Subcommand
from allennlp.common.util import JsonDict
class ConvertTREC(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
# pylint: disable=protected-access
description = '''Convert JSON output to TREC or TSV output'''
subparser = parser.add_parser(
name, description=description, help='Convert JSON output to TREC or TSV.')
subparser.add_argument('--input', type=str, help='input file', required=True)
subparser.add_argument('--output', type=str, default='', help='output file or directory')
subparser.add_argument('--corrections', type=str, default='', help='corrections file for empty queries')
subparser.add_argument('--type', type=str, default='trec', help='output type [trec, tsv]')
subparser.set_defaults(func=_convert)
return subparser
def _convert(args: argparse.Namespace) -> None:
output_type = 'file'
if os.path.isdir(args.output):
output_type = 'dir'
else:
fp = open(args.output, 'w')
fp.close()
files = [args.input]
if args.corrections != '':
files.append(args.corrections)
for file in files:
with open(file) as fp:
for line in fp:
query = json.loads(line)
process_query(query, args.output, args.type, output_type)
def process_query(query: JsonDict, output: str, doctype: str = 'trec', output_type: str = 'file') -> None:
query_id = query['query_id']
if output_type == 'file':
fp = open(output, 'a')
else:
fp = open(os.path.join(output, 'q-' + query_id + '.' + doctype), 'w')
if doctype == 'tsv': fp.write(f'{query_id}\tquery\n')
if 'scores' in query:
for i, (doc_id, score) in enumerate(query['scores']):
if doctype == 'trec':
fp.write(f'{query_id}\tQ0\t{doc_id}\t{i+1}\t{score}\tneuclir\n')
elif doctype == 'tsv':
fp.write(f'{doc_id}\t{score}\n')
fp.close()
| null |
neuclir/commands/convert_trec.py
|
convert_trec.py
|
py
| 2,147 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "allennlp.commands.subcommand.Subcommand",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "argparse._SubParsersAction",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "argparse.Namespace",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "allennlp.common.util.JsonDict",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 51,
"usage_type": "attribute"
}
] |
111808028
|
# coding: utf-8
# In[1]:
import preprocess as read_files
import glob
import numpy as np
# # read file
# In[2]:
ucf = read_files.UCF()
fea = glob.glob('ucf101_fea_video/*.mat')
ucf.fill(fea)
ucf.dump('fea.ucf')
ucf_test = read_files.UCF()
ucf_test.svd_data = list()
feaval = glob.glob('ucf101_fea_val_video/*.mat')
ucf_test.fill(feaval)
ucf_test.dump('feaval.ucf')
# # cluster
# In[4]:
from sklearn.cluster import KMeans
def kmeans_method(data):
X =data[0]
kmeans = KMeans(9).fit(X)
eleven = [[],[]]
existed = []
for i in range(len(kmeans.labels_)):
if kmeans.labels_[i] in existed:
pass
else:
eleven[0].append(X[i])
eleven[1].append(data[1][i])
existed.append(kmeans.labels_[i])
return eleven
ucf.eleven_data = []
for data in ucf.data:
tmp = kmeans_method(data)
ucf.eleven_data.append(tmp)
ucf_test.eleven_data = []
for data in ucf_test.data:
tmp= kmeans_method(data)
ucf_test.eleven_data.append(tmp)
# # neural network
# In[8]:
from mxnet import nd, gluon, init, autograd
from mxnet.gluon import nn
from mxnet.gluon.data.vision import datasets, transforms
import matplotlib.pyplot as plt
from time import time
# In[9]:
svd_data = []
for ele in ucf.eleven_data:
svd_data.append(nd.array(ele))
train_label =[]
for ele in ucf.label:
train_label.append(int(ele))
# In[30]:
ucf_test_data =[]
for ele in ucf_test.eleven_data:
ucf_test_data.append(nd.array(ele))
test_label = []
for ele in ucf_test.label:
test_label.append(int(ele))
test_samples = zip(ucf_test_data,test_label)
# In[10]:
for ele in svd_data:
print(type(ele))
break
# In[11]:
import numpy as np
samples = zip(svd_data,train_label)
np.random.shuffle(samples)
train_dataset = samples[:int(len(ucf.label)*0.9)]
valid_dataset = samples[int(len(ucf.label)*0.9):]
# def ndify(dataset):
# for ele in dataset:
# ele[0] = nd.array(np.asarray(ele[0]))
# ndify(train_dataset)
# ndify(valid_dataset)
# ndify(test_dataset)
# for elem in train_dataset:
# print(type(elem))
# break
# In[12]:
batch_size=256
train_data = gluon.data.DataLoader(train_dataset, batch_size=batch_size,shuffle=True,num_workers=4)
valid_data = gluon.data.DataLoader(valid_dataset, batch_size=batch_size,shuffle=True,num_workers=4)
test_data = gluon.data.DataLoader(test_samples, batch_size=batch_size,shuffle=True,num_workers=4)
# In[13]:
for data, label in train_data:
print(data.shape, label.shape)
break
# In[19]:
net = nn.Sequential()
net.add(
nn.Conv2D(channels=4, kernel_size=1, activation='relu'),
nn.MaxPool2D(pool_size=2, strides=2),
#nn.Dropout(0.8),
nn.Conv2D(channels=4, kernel_size=1, activation='relu'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Flatten(),
#nn.Dense(4096,activation='relu'),
nn.Dropout(.1),
nn.Dense(1024, activation="relu"),
nn.Dropout(.1),
nn.Dense(128, activation="relu"),
nn.Dropout(.8),
nn.Dense(52))
net.initialize(init=init.Xavier())
# from mxnet.gluon.model_zoo import vision
#
# net = vision.alexnet(pretrained=False,classes=50)
# #net.add(nn.Dense(50))
# net.initialize(init=init.Xavier())
# In[20]:
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
# In[22]:
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
# In[54]:
def acc(output, label):
# output: (batch, num_output) float32 ndarray
# label: (batch, ) int32 ndarray
return (output.argmax(axis=1) ==
label.astype('float32')).mean().asscalar()
# In[62]:
for epoch in range(10):
train_loss, train_acc, valid_acc = 0., 0., 0.
tic = time()
for data, label in train_data:
# forward + backward
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
# update parameters
trainer.step(batch_size)
# calculate training metrics
train_loss += loss.mean().asscalar()
train_acc += acc(output, label)
# calculate validation accuracy
for data, label in valid_data:
valid_acc += acc(net(data), label)
print("Epoch %d: Loss: %.3f, Train acc %.3f, valid acc %.3f, Time %.1f sec" % (
epoch, train_loss/len(train_data),
train_acc/len(train_data),
valid_acc/len(valid_data), time()-tic))
# In[75]:
test_acc = 0.
for data, label in test_data:
test_acc += acc(net(data), label)
print("Test acc %.3f "%(test_acc/len(test_data)))
| null |
svd_method.py
|
svd_method.py
|
py
| 4,629 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "preprocess.UCF",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "preprocess.UCF",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "mxnet.nd.array",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "mxnet.nd",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "mxnet.nd.array",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "mxnet.nd",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "mxnet.gluon.data.DataLoader",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.data",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "mxnet.gluon",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.data.DataLoader",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.data",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "mxnet.gluon",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.data.DataLoader",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.data",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "mxnet.gluon",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Sequential",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Conv2D",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.MaxPool2D",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Conv2D",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.MaxPool2D",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Flatten",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Dropout",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Dense",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Dropout",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Dense",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Dropout",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.nn.Dense",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.nn",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "mxnet.init.Xavier",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "mxnet.init",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.loss.SoftmaxCrossEntropyLoss",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.loss",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "mxnet.gluon",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "mxnet.gluon.Trainer",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "mxnet.autograd.record",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "mxnet.autograd",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 219,
"usage_type": "call"
}
] |
79197329
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableMapping, MutableSequence
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
from google.type import postal_address_pb2 # type: ignore
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.domains.v1",
manifest={
"ContactPrivacy",
"DomainNotice",
"ContactNotice",
"TransferLockState",
"Registration",
"ManagementSettings",
"DnsSettings",
"ContactSettings",
"SearchDomainsRequest",
"SearchDomainsResponse",
"RetrieveRegisterParametersRequest",
"RetrieveRegisterParametersResponse",
"RegisterDomainRequest",
"RetrieveTransferParametersRequest",
"RetrieveTransferParametersResponse",
"TransferDomainRequest",
"ListRegistrationsRequest",
"ListRegistrationsResponse",
"GetRegistrationRequest",
"UpdateRegistrationRequest",
"ConfigureManagementSettingsRequest",
"ConfigureDnsSettingsRequest",
"ConfigureContactSettingsRequest",
"ExportRegistrationRequest",
"DeleteRegistrationRequest",
"RetrieveAuthorizationCodeRequest",
"ResetAuthorizationCodeRequest",
"RegisterParameters",
"TransferParameters",
"AuthorizationCode",
"OperationMetadata",
},
)
class ContactPrivacy(proto.Enum):
r"""Defines a set of possible contact privacy settings for a
``Registration``.
`ICANN <https://icann.org/>`__ maintains the WHOIS database, a
publicly accessible mapping from domain name to contact information,
and requires that each domain name have an entry. Choose from these
options to control how much information in your ``ContactSettings``
is published.
Values:
CONTACT_PRIVACY_UNSPECIFIED (0):
The contact privacy settings are undefined.
PUBLIC_CONTACT_DATA (1):
All the data from ``ContactSettings`` is publicly available.
When setting this option, you must also provide a
``PUBLIC_CONTACT_DATA_ACKNOWLEDGEMENT`` in the
``contact_notices`` field of the request.
PRIVATE_CONTACT_DATA (2):
None of the data from ``ContactSettings`` is publicly
available. Instead, proxy contact data is published for your
domain. Email sent to the proxy email address is forwarded
to the registrant's email address. Cloud Domains provides
this privacy proxy service at no additional cost.
REDACTED_CONTACT_DATA (3):
Some data from ``ContactSettings`` is publicly available.
The actual information redacted depends on the domain. For
details, see `the registration privacy
article <https://support.google.com/domains/answer/3251242>`__.
"""
CONTACT_PRIVACY_UNSPECIFIED = 0
PUBLIC_CONTACT_DATA = 1
PRIVATE_CONTACT_DATA = 2
REDACTED_CONTACT_DATA = 3
class DomainNotice(proto.Enum):
r"""Notices about special properties of certain domains.
Values:
DOMAIN_NOTICE_UNSPECIFIED (0):
The notice is undefined.
HSTS_PRELOADED (1):
Indicates that the domain is preloaded on the HTTP Strict
Transport Security list in browsers. Serving a website on
such domain requires an SSL certificate. For details, see
`how to get an SSL
certificate <https://support.google.com/domains/answer/7638036>`__.
"""
DOMAIN_NOTICE_UNSPECIFIED = 0
HSTS_PRELOADED = 1
class ContactNotice(proto.Enum):
r"""Notices related to contact information.
Values:
CONTACT_NOTICE_UNSPECIFIED (0):
The notice is undefined.
PUBLIC_CONTACT_DATA_ACKNOWLEDGEMENT (1):
Required when setting the ``privacy`` field of
``ContactSettings`` to ``PUBLIC_CONTACT_DATA``, which
exposes contact data publicly.
"""
CONTACT_NOTICE_UNSPECIFIED = 0
PUBLIC_CONTACT_DATA_ACKNOWLEDGEMENT = 1
class TransferLockState(proto.Enum):
r"""Possible states of a ``Registration``'s transfer lock.
Values:
TRANSFER_LOCK_STATE_UNSPECIFIED (0):
The state is unspecified.
UNLOCKED (1):
The domain is unlocked and can be transferred
to another registrar.
LOCKED (2):
The domain is locked and cannot be
transferred to another registrar.
"""
TRANSFER_LOCK_STATE_UNSPECIFIED = 0
UNLOCKED = 1
LOCKED = 2
class Registration(proto.Message):
r"""The ``Registration`` resource facilitates managing and configuring
domain name registrations.
There are several ways to create a new ``Registration`` resource:
To create a new ``Registration`` resource, find a suitable domain
name by calling the ``SearchDomains`` method with a query to see
available domain name options. After choosing a name, call
``RetrieveRegisterParameters`` to ensure availability and obtain
information like pricing, which is needed to build a call to
``RegisterDomain``.
Another way to create a new ``Registration`` is to transfer an
existing domain from another registrar. First, go to the current
registrar to unlock the domain for transfer and retrieve the
domain's transfer authorization code. Then call
``RetrieveTransferParameters`` to confirm that the domain is
unlocked and to get values needed to build a call to
``TransferDomain``.
Attributes:
name (str):
Output only. Name of the ``Registration`` resource, in the
format
``projects/*/locations/*/registrations/<domain_name>``.
domain_name (str):
Required. Immutable. The domain name. Unicode
domain names must be expressed in Punycode
format.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The creation timestamp of the ``Registration``
resource.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The expiration timestamp of the
``Registration``.
state (google.cloud.domains_v1.types.Registration.State):
Output only. The state of the ``Registration``
issues (MutableSequence[google.cloud.domains_v1.types.Registration.Issue]):
Output only. The set of issues with the ``Registration``
that require attention.
labels (MutableMapping[str, str]):
Set of labels associated with the ``Registration``.
management_settings (google.cloud.domains_v1.types.ManagementSettings):
Settings for management of the ``Registration``, including
renewal, billing, and transfer. You cannot update these with
the ``UpdateRegistration`` method. To update these settings,
use the ``ConfigureManagementSettings`` method.
dns_settings (google.cloud.domains_v1.types.DnsSettings):
Settings controlling the DNS configuration of the
``Registration``. You cannot update these with the
``UpdateRegistration`` method. To update these settings, use
the ``ConfigureDnsSettings`` method.
contact_settings (google.cloud.domains_v1.types.ContactSettings):
Required. Settings for contact information linked to the
``Registration``. You cannot update these with the
``UpdateRegistration`` method. To update these settings, use
the ``ConfigureContactSettings`` method.
pending_contact_settings (google.cloud.domains_v1.types.ContactSettings):
Output only. Pending contact settings for the
``Registration``. Updates to the ``contact_settings`` field
that change its ``registrant_contact`` or ``privacy`` fields
require email confirmation by the ``registrant_contact``
before taking effect. This field is set only if there are
pending updates to the ``contact_settings`` that have not
been confirmed. To confirm the changes, the
``registrant_contact`` must follow the instructions in the
email they receive.
supported_privacy (MutableSequence[google.cloud.domains_v1.types.ContactPrivacy]):
Output only. Set of options for the
``contact_settings.privacy`` field that this
``Registration`` supports.
"""
class State(proto.Enum):
r"""Possible states of a ``Registration``.
Values:
STATE_UNSPECIFIED (0):
The state is undefined.
REGISTRATION_PENDING (1):
The domain is being registered.
REGISTRATION_FAILED (2):
The domain registration failed. You can
delete resources in this state to allow
registration to be retried.
TRANSFER_PENDING (3):
The domain is being transferred from another
registrar to Cloud Domains.
TRANSFER_FAILED (4):
The attempt to transfer the domain from
another registrar to Cloud Domains failed. You
can delete resources in this state and retry the
transfer.
ACTIVE (6):
The domain is registered and operational. The
domain renews automatically as long as it
remains in this state.
SUSPENDED (7):
The domain is suspended and inoperative. For more details,
see the ``issues`` field.
EXPORTED (8):
The domain is no longer managed with Cloud Domains. It may
have been transferred to another registrar or exported for
management in `Google Domains <https://domains.google/>`__.
You can no longer update it with this API, and information
shown about it may be stale. Domains in this state are not
automatically renewed by Cloud Domains.
"""
STATE_UNSPECIFIED = 0
REGISTRATION_PENDING = 1
REGISTRATION_FAILED = 2
TRANSFER_PENDING = 3
TRANSFER_FAILED = 4
ACTIVE = 6
SUSPENDED = 7
EXPORTED = 8
class Issue(proto.Enum):
r"""Possible issues with a ``Registration`` that require attention.
Values:
ISSUE_UNSPECIFIED (0):
The issue is undefined.
CONTACT_SUPPORT (1):
Contact the Cloud Support team to resolve a
problem with this domain.
UNVERIFIED_EMAIL (2):
`ICANN <https://icann.org/>`__ requires verification of the
email address in the ``Registration``'s
``contact_settings.registrant_contact`` field. To verify the
email address, follow the instructions in the email the
``registrant_contact`` receives following registration. If
you do not complete email verification within 15 days of
registration, the domain is suspended. To resend the
verification email, call ConfigureContactSettings and
provide the current ``registrant_contact.email``.
"""
ISSUE_UNSPECIFIED = 0
CONTACT_SUPPORT = 1
UNVERIFIED_EMAIL = 2
name: str = proto.Field(
proto.STRING,
number=1,
)
domain_name: str = proto.Field(
proto.STRING,
number=2,
)
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
expire_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
state: State = proto.Field(
proto.ENUM,
number=7,
enum=State,
)
issues: MutableSequence[Issue] = proto.RepeatedField(
proto.ENUM,
number=8,
enum=Issue,
)
labels: MutableMapping[str, str] = proto.MapField(
proto.STRING,
proto.STRING,
number=9,
)
management_settings: "ManagementSettings" = proto.Field(
proto.MESSAGE,
number=10,
message="ManagementSettings",
)
dns_settings: "DnsSettings" = proto.Field(
proto.MESSAGE,
number=11,
message="DnsSettings",
)
contact_settings: "ContactSettings" = proto.Field(
proto.MESSAGE,
number=12,
message="ContactSettings",
)
pending_contact_settings: "ContactSettings" = proto.Field(
proto.MESSAGE,
number=13,
message="ContactSettings",
)
supported_privacy: MutableSequence["ContactPrivacy"] = proto.RepeatedField(
proto.ENUM,
number=14,
enum="ContactPrivacy",
)
class ManagementSettings(proto.Message):
r"""Defines renewal, billing, and transfer settings for a
``Registration``.
Attributes:
renewal_method (google.cloud.domains_v1.types.ManagementSettings.RenewalMethod):
Output only. The renewal method for this ``Registration``.
transfer_lock_state (google.cloud.domains_v1.types.TransferLockState):
Controls whether the domain can be
transferred to another registrar.
"""
class RenewalMethod(proto.Enum):
r"""Defines how the ``Registration`` is renewed.
Values:
RENEWAL_METHOD_UNSPECIFIED (0):
The renewal method is undefined.
AUTOMATIC_RENEWAL (1):
The domain is automatically renewed each year .
To disable automatic renewals, delete the resource by
calling ``DeleteRegistration`` or export it by calling
``ExportRegistration``.
MANUAL_RENEWAL (2):
The domain must be explicitly renewed each year before its
``expire_time``. This option is only available when the
``Registration`` is in state ``EXPORTED``.
To manage the domain's current billing and renewal settings,
go to `Google Domains <https://domains.google/>`__.
"""
RENEWAL_METHOD_UNSPECIFIED = 0
AUTOMATIC_RENEWAL = 1
MANUAL_RENEWAL = 2
renewal_method: RenewalMethod = proto.Field(
proto.ENUM,
number=3,
enum=RenewalMethod,
)
transfer_lock_state: "TransferLockState" = proto.Field(
proto.ENUM,
number=4,
enum="TransferLockState",
)
class DnsSettings(proto.Message):
r"""Defines the DNS configuration of a ``Registration``, including name
servers, DNSSEC, and glue records.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
custom_dns (google.cloud.domains_v1.types.DnsSettings.CustomDns):
An arbitrary DNS provider identified by its
name servers.
This field is a member of `oneof`_ ``dns_provider``.
google_domains_dns (google.cloud.domains_v1.types.DnsSettings.GoogleDomainsDns):
The free DNS zone provided by `Google
Domains <https://domains.google/>`__.
This field is a member of `oneof`_ ``dns_provider``.
glue_records (MutableSequence[google.cloud.domains_v1.types.DnsSettings.GlueRecord]):
The list of glue records for this ``Registration``. Commonly
empty.
"""
class DsState(proto.Enum):
r"""The publication state of DS records for a ``Registration``.
Values:
DS_STATE_UNSPECIFIED (0):
DS state is unspecified.
DS_RECORDS_UNPUBLISHED (1):
DNSSEC is disabled for this domain. No DS
records for this domain are published in the
parent DNS zone.
DS_RECORDS_PUBLISHED (2):
DNSSEC is enabled for this domain. Appropriate DS records
for this domain are published in the parent DNS zone. This
option is valid only if the DNS zone referenced in the
``Registration``'s ``dns_provider`` field is already
DNSSEC-signed.
"""
DS_STATE_UNSPECIFIED = 0
DS_RECORDS_UNPUBLISHED = 1
DS_RECORDS_PUBLISHED = 2
class CustomDns(proto.Message):
r"""Configuration for an arbitrary DNS provider.
Attributes:
name_servers (MutableSequence[str]):
Required. A list of name servers that store
the DNS zone for this domain. Each name server
is a domain name, with Unicode domain names
expressed in Punycode format.
ds_records (MutableSequence[google.cloud.domains_v1.types.DnsSettings.DsRecord]):
The list of DS records for this domain, which
are used to enable DNSSEC. The domain's DNS
provider can provide the values to set here. If
this field is empty, DNSSEC is disabled.
"""
name_servers: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
ds_records: MutableSequence["DnsSettings.DsRecord"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="DnsSettings.DsRecord",
)
class GoogleDomainsDns(proto.Message):
r"""Configuration for using the free DNS zone provided by Google Domains
as a ``Registration``'s ``dns_provider``. You cannot configure the
DNS zone itself using the API. To configure the DNS zone, go to
`Google Domains <https://domains.google/>`__.
Attributes:
name_servers (MutableSequence[str]):
Output only. A list of name servers that
store the DNS zone for this domain. Each name
server is a domain name, with Unicode domain
names expressed in Punycode format. This field
is automatically populated with the name servers
assigned to the Google Domains DNS zone.
ds_state (google.cloud.domains_v1.types.DnsSettings.DsState):
Required. The state of DS records for this
domain. Used to enable or disable automatic
DNSSEC.
ds_records (MutableSequence[google.cloud.domains_v1.types.DnsSettings.DsRecord]):
Output only. The list of DS records published for this
domain. The list is automatically populated when
``ds_state`` is ``DS_RECORDS_PUBLISHED``, otherwise it
remains empty.
"""
name_servers: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
ds_state: "DnsSettings.DsState" = proto.Field(
proto.ENUM,
number=2,
enum="DnsSettings.DsState",
)
ds_records: MutableSequence["DnsSettings.DsRecord"] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="DnsSettings.DsRecord",
)
class DsRecord(proto.Message):
r"""Defines a Delegation Signer (DS) record, which is needed to
enable DNSSEC for a domain. It contains a digest (hash) of a
DNSKEY record that must be present in the domain's DNS zone.
Attributes:
key_tag (int):
The key tag of the record. Must be set in
range 0 -- 65535.
algorithm (google.cloud.domains_v1.types.DnsSettings.DsRecord.Algorithm):
The algorithm used to generate the referenced
DNSKEY.
digest_type (google.cloud.domains_v1.types.DnsSettings.DsRecord.DigestType):
The hash function used to generate the digest
of the referenced DNSKEY.
digest (str):
The digest generated from the referenced
DNSKEY.
"""
class Algorithm(proto.Enum):
r"""List of algorithms used to create a DNSKEY. Certain
algorithms are not supported for particular domains.
Values:
ALGORITHM_UNSPECIFIED (0):
The algorithm is unspecified.
RSAMD5 (1):
RSA/MD5. Cannot be used for new deployments.
DH (2):
Diffie-Hellman. Cannot be used for new
deployments.
DSA (3):
DSA/SHA1. Not recommended for new
deployments.
ECC (4):
ECC. Not recommended for new deployments.
RSASHA1 (5):
RSA/SHA-1. Not recommended for new
deployments.
DSANSEC3SHA1 (6):
DSA-NSEC3-SHA1. Not recommended for new
deployments.
RSASHA1NSEC3SHA1 (7):
RSA/SHA1-NSEC3-SHA1. Not recommended for new
deployments.
RSASHA256 (8):
RSA/SHA-256.
RSASHA512 (10):
RSA/SHA-512.
ECCGOST (12):
GOST R 34.10-2001.
ECDSAP256SHA256 (13):
ECDSA Curve P-256 with SHA-256.
ECDSAP384SHA384 (14):
ECDSA Curve P-384 with SHA-384.
ED25519 (15):
Ed25519.
ED448 (16):
Ed448.
INDIRECT (252):
Reserved for Indirect Keys. Cannot be used
for new deployments.
PRIVATEDNS (253):
Private algorithm. Cannot be used for new
deployments.
PRIVATEOID (254):
Private algorithm OID. Cannot be used for new
deployments.
"""
ALGORITHM_UNSPECIFIED = 0
RSAMD5 = 1
DH = 2
DSA = 3
ECC = 4
RSASHA1 = 5
DSANSEC3SHA1 = 6
RSASHA1NSEC3SHA1 = 7
RSASHA256 = 8
RSASHA512 = 10
ECCGOST = 12
ECDSAP256SHA256 = 13
ECDSAP384SHA384 = 14
ED25519 = 15
ED448 = 16
INDIRECT = 252
PRIVATEDNS = 253
PRIVATEOID = 254
class DigestType(proto.Enum):
r"""List of hash functions that may have been used to generate a
digest of a DNSKEY.
Values:
DIGEST_TYPE_UNSPECIFIED (0):
The DigestType is unspecified.
SHA1 (1):
SHA-1. Not recommended for new deployments.
SHA256 (2):
SHA-256.
GOST3411 (3):
GOST R 34.11-94.
SHA384 (4):
SHA-384.
"""
DIGEST_TYPE_UNSPECIFIED = 0
SHA1 = 1
SHA256 = 2
GOST3411 = 3
SHA384 = 4
key_tag: int = proto.Field(
proto.INT32,
number=1,
)
algorithm: "DnsSettings.DsRecord.Algorithm" = proto.Field(
proto.ENUM,
number=2,
enum="DnsSettings.DsRecord.Algorithm",
)
digest_type: "DnsSettings.DsRecord.DigestType" = proto.Field(
proto.ENUM,
number=3,
enum="DnsSettings.DsRecord.DigestType",
)
digest: str = proto.Field(
proto.STRING,
number=4,
)
class GlueRecord(proto.Message):
r"""Defines a host on your domain that is a DNS name server for your
domain and/or other domains. Glue records are a way of making the IP
address of a name server known, even when it serves DNS queries for
its parent domain. For example, when ``ns.example.com`` is a name
server for ``example.com``, the host ``ns.example.com`` must have a
glue record to break the circular DNS reference.
Attributes:
host_name (str):
Required. Domain name of the host in Punycode
format.
ipv4_addresses (MutableSequence[str]):
List of IPv4 addresses corresponding to this host in the
standard decimal format (e.g. ``198.51.100.1``). At least
one of ``ipv4_address`` and ``ipv6_address`` must be set.
ipv6_addresses (MutableSequence[str]):
List of IPv6 addresses corresponding to this host in the
standard hexadecimal format (e.g. ``2001:db8::``). At least
one of ``ipv4_address`` and ``ipv6_address`` must be set.
"""
host_name: str = proto.Field(
proto.STRING,
number=1,
)
ipv4_addresses: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=2,
)
ipv6_addresses: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
custom_dns: CustomDns = proto.Field(
proto.MESSAGE,
number=1,
oneof="dns_provider",
message=CustomDns,
)
google_domains_dns: GoogleDomainsDns = proto.Field(
proto.MESSAGE,
number=2,
oneof="dns_provider",
message=GoogleDomainsDns,
)
glue_records: MutableSequence[GlueRecord] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=GlueRecord,
)
class ContactSettings(proto.Message):
r"""Defines the contact information associated with a ``Registration``.
`ICANN <https://icann.org/>`__ requires all domain names to have
associated contact information. The ``registrant_contact`` is
considered the domain's legal owner, and often the other contacts
are identical.
Attributes:
privacy (google.cloud.domains_v1.types.ContactPrivacy):
Required. Privacy setting for the contacts associated with
the ``Registration``.
registrant_contact (google.cloud.domains_v1.types.ContactSettings.Contact):
Required. The registrant contact for the ``Registration``.
*Caution: Anyone with access to this email address, phone
number, and/or postal address can take control of the
domain.*
*Warning: For new ``Registration``\ s, the registrant
receives an email confirmation that they must complete
within 15 days to avoid domain suspension.*
admin_contact (google.cloud.domains_v1.types.ContactSettings.Contact):
Required. The administrative contact for the
``Registration``.
technical_contact (google.cloud.domains_v1.types.ContactSettings.Contact):
Required. The technical contact for the ``Registration``.
"""
class Contact(proto.Message):
r"""Details required for a contact associated with a ``Registration``.
Attributes:
postal_address (google.type.postal_address_pb2.PostalAddress):
Required. Postal address of the contact.
email (str):
Required. Email address of the contact.
phone_number (str):
Required. Phone number of the contact in international
format. For example, ``"+1-800-555-0123"``.
fax_number (str):
Fax number of the contact in international format. For
example, ``"+1-800-555-0123"``.
"""
postal_address: postal_address_pb2.PostalAddress = proto.Field(
proto.MESSAGE,
number=1,
message=postal_address_pb2.PostalAddress,
)
email: str = proto.Field(
proto.STRING,
number=2,
)
phone_number: str = proto.Field(
proto.STRING,
number=3,
)
fax_number: str = proto.Field(
proto.STRING,
number=4,
)
privacy: "ContactPrivacy" = proto.Field(
proto.ENUM,
number=1,
enum="ContactPrivacy",
)
registrant_contact: Contact = proto.Field(
proto.MESSAGE,
number=2,
message=Contact,
)
admin_contact: Contact = proto.Field(
proto.MESSAGE,
number=3,
message=Contact,
)
technical_contact: Contact = proto.Field(
proto.MESSAGE,
number=4,
message=Contact,
)
class SearchDomainsRequest(proto.Message):
r"""Request for the ``SearchDomains`` method.
Attributes:
query (str):
Required. String used to search for available
domain names.
location (str):
Required. The location. Must be in the format
``projects/*/locations/*``.
"""
query: str = proto.Field(
proto.STRING,
number=1,
)
location: str = proto.Field(
proto.STRING,
number=2,
)
class SearchDomainsResponse(proto.Message):
r"""Response for the ``SearchDomains`` method.
Attributes:
register_parameters (MutableSequence[google.cloud.domains_v1.types.RegisterParameters]):
Results of the domain name search.
"""
register_parameters: MutableSequence["RegisterParameters"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="RegisterParameters",
)
class RetrieveRegisterParametersRequest(proto.Message):
r"""Request for the ``RetrieveRegisterParameters`` method.
Attributes:
domain_name (str):
Required. The domain name. Unicode domain
names must be expressed in Punycode format.
location (str):
Required. The location. Must be in the format
``projects/*/locations/*``.
"""
domain_name: str = proto.Field(
proto.STRING,
number=1,
)
location: str = proto.Field(
proto.STRING,
number=2,
)
class RetrieveRegisterParametersResponse(proto.Message):
r"""Response for the ``RetrieveRegisterParameters`` method.
Attributes:
register_parameters (google.cloud.domains_v1.types.RegisterParameters):
Parameters to use when calling the ``RegisterDomain``
method.
"""
register_parameters: "RegisterParameters" = proto.Field(
proto.MESSAGE,
number=1,
message="RegisterParameters",
)
class RegisterDomainRequest(proto.Message):
r"""Request for the ``RegisterDomain`` method.
Attributes:
parent (str):
Required. The parent resource of the ``Registration``. Must
be in the format ``projects/*/locations/*``.
registration (google.cloud.domains_v1.types.Registration):
Required. The complete ``Registration`` resource to be
created.
domain_notices (MutableSequence[google.cloud.domains_v1.types.DomainNotice]):
The list of domain notices that you acknowledge. Call
``RetrieveRegisterParameters`` to see the notices that need
acknowledgement.
contact_notices (MutableSequence[google.cloud.domains_v1.types.ContactNotice]):
The list of contact notices that the caller acknowledges.
The notices needed here depend on the values specified in
``registration.contact_settings``.
yearly_price (google.type.money_pb2.Money):
Required. Yearly price to register or renew
the domain. The value that should be put here
can be obtained from RetrieveRegisterParameters
or SearchDomains calls.
validate_only (bool):
When true, only validation is performed, without actually
registering the domain. Follows:
https://cloud.google.com/apis/design/design_patterns#request_validation
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
registration: "Registration" = proto.Field(
proto.MESSAGE,
number=2,
message="Registration",
)
domain_notices: MutableSequence["DomainNotice"] = proto.RepeatedField(
proto.ENUM,
number=3,
enum="DomainNotice",
)
contact_notices: MutableSequence["ContactNotice"] = proto.RepeatedField(
proto.ENUM,
number=4,
enum="ContactNotice",
)
yearly_price: money_pb2.Money = proto.Field(
proto.MESSAGE,
number=5,
message=money_pb2.Money,
)
validate_only: bool = proto.Field(
proto.BOOL,
number=6,
)
class RetrieveTransferParametersRequest(proto.Message):
r"""Request for the ``RetrieveTransferParameters`` method.
Attributes:
domain_name (str):
Required. The domain name. Unicode domain
names must be expressed in Punycode format.
location (str):
Required. The location. Must be in the format
``projects/*/locations/*``.
"""
domain_name: str = proto.Field(
proto.STRING,
number=1,
)
location: str = proto.Field(
proto.STRING,
number=2,
)
class RetrieveTransferParametersResponse(proto.Message):
r"""Response for the ``RetrieveTransferParameters`` method.
Attributes:
transfer_parameters (google.cloud.domains_v1.types.TransferParameters):
Parameters to use when calling the ``TransferDomain``
method.
"""
transfer_parameters: "TransferParameters" = proto.Field(
proto.MESSAGE,
number=1,
message="TransferParameters",
)
class TransferDomainRequest(proto.Message):
r"""Request for the ``TransferDomain`` method.
Attributes:
parent (str):
Required. The parent resource of the ``Registration``. Must
be in the format ``projects/*/locations/*``.
registration (google.cloud.domains_v1.types.Registration):
Required. The complete ``Registration`` resource to be
created.
You can leave ``registration.dns_settings`` unset to import
the domain's current DNS configuration from its current
registrar. Use this option only if you are sure that the
domain's current DNS service does not cease upon transfer,
as is often the case for DNS services provided for free by
the registrar.
contact_notices (MutableSequence[google.cloud.domains_v1.types.ContactNotice]):
The list of contact notices that you acknowledge. The
notices needed here depend on the values specified in
``registration.contact_settings``.
yearly_price (google.type.money_pb2.Money):
Required. Acknowledgement of the price to transfer or renew
the domain for one year. Call ``RetrieveTransferParameters``
to obtain the price, which you must acknowledge.
authorization_code (google.cloud.domains_v1.types.AuthorizationCode):
The domain's transfer authorization code. You
can obtain this from the domain's current
registrar.
validate_only (bool):
Validate the request without actually
transferring the domain.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
registration: "Registration" = proto.Field(
proto.MESSAGE,
number=2,
message="Registration",
)
contact_notices: MutableSequence["ContactNotice"] = proto.RepeatedField(
proto.ENUM,
number=3,
enum="ContactNotice",
)
yearly_price: money_pb2.Money = proto.Field(
proto.MESSAGE,
number=4,
message=money_pb2.Money,
)
authorization_code: "AuthorizationCode" = proto.Field(
proto.MESSAGE,
number=5,
message="AuthorizationCode",
)
validate_only: bool = proto.Field(
proto.BOOL,
number=6,
)
class ListRegistrationsRequest(proto.Message):
r"""Request for the ``ListRegistrations`` method.
Attributes:
parent (str):
Required. The project and location from which to list
``Registration``\ s, specified in the format
``projects/*/locations/*``.
page_size (int):
Maximum number of results to return.
page_token (str):
When set to the ``next_page_token`` from a prior response,
provides the next page of results.
filter (str):
Filter expression to restrict the ``Registration``\ s
returned.
The expression must specify the field name, a comparison
operator, and the value that you want to use for filtering.
The value must be a string, a number, a boolean, or an enum
value. The comparison operator should be one of =, !=, >, <,
>=, <=, or : for prefix or wildcard matches.
For example, to filter to a specific domain name, use an
expression like ``domainName="example.com"``. You can also
check for the existence of a field; for example, to find
domains using custom DNS settings, use an expression like
``dnsSettings.customDns:*``.
You can also create compound filters by combining
expressions with the ``AND`` and ``OR`` operators. For
example, to find domains that are suspended or have specific
issues flagged, use an expression like
``(state=SUSPENDED) OR (issue:*)``.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
page_size: int = proto.Field(
proto.INT32,
number=2,
)
page_token: str = proto.Field(
proto.STRING,
number=3,
)
filter: str = proto.Field(
proto.STRING,
number=4,
)
class ListRegistrationsResponse(proto.Message):
r"""Response for the ``ListRegistrations`` method.
Attributes:
registrations (MutableSequence[google.cloud.domains_v1.types.Registration]):
A list of ``Registration``\ s.
next_page_token (str):
When present, there are more results to retrieve. Set
``page_token`` to this value on a subsequent call to get the
next page of results.
"""
@property
def raw_page(self):
return self
registrations: MutableSequence["Registration"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Registration",
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
class GetRegistrationRequest(proto.Message):
r"""Request for the ``GetRegistration`` method.
Attributes:
name (str):
Required. The name of the ``Registration`` to get, in the
format ``projects/*/locations/*/registrations/*``.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class UpdateRegistrationRequest(proto.Message):
r"""Request for the ``UpdateRegistration`` method.
Attributes:
registration (google.cloud.domains_v1.types.Registration):
Fields of the ``Registration`` to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The field mask describing which fields to update
as a comma-separated list. For example, if only the labels
are being updated, the ``update_mask`` is ``"labels"``.
"""
registration: "Registration" = proto.Field(
proto.MESSAGE,
number=1,
message="Registration",
)
update_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class ConfigureManagementSettingsRequest(proto.Message):
r"""Request for the ``ConfigureManagementSettings`` method.
Attributes:
registration (str):
Required. The name of the ``Registration`` whose management
settings are being updated, in the format
``projects/*/locations/*/registrations/*``.
management_settings (google.cloud.domains_v1.types.ManagementSettings):
Fields of the ``ManagementSettings`` to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The field mask describing which fields to update
as a comma-separated list. For example, if only the transfer
lock is being updated, the ``update_mask`` is
``"transfer_lock_state"``.
"""
registration: str = proto.Field(
proto.STRING,
number=1,
)
management_settings: "ManagementSettings" = proto.Field(
proto.MESSAGE,
number=2,
message="ManagementSettings",
)
update_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=3,
message=field_mask_pb2.FieldMask,
)
class ConfigureDnsSettingsRequest(proto.Message):
r"""Request for the ``ConfigureDnsSettings`` method.
Attributes:
registration (str):
Required. The name of the ``Registration`` whose DNS
settings are being updated, in the format
``projects/*/locations/*/registrations/*``.
dns_settings (google.cloud.domains_v1.types.DnsSettings):
Fields of the ``DnsSettings`` to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The field mask describing which fields to update
as a comma-separated list. For example, if only the name
servers are being updated for an existing Custom DNS
configuration, the ``update_mask`` is
``"custom_dns.name_servers"``.
When changing the DNS provider from one type to another,
pass the new provider's field name as part of the field
mask. For example, when changing from a Google Domains DNS
configuration to a Custom DNS configuration, the
``update_mask`` is ``"custom_dns"``. //
validate_only (bool):
Validate the request without actually
updating the DNS settings.
"""
registration: str = proto.Field(
proto.STRING,
number=1,
)
dns_settings: "DnsSettings" = proto.Field(
proto.MESSAGE,
number=2,
message="DnsSettings",
)
update_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=3,
message=field_mask_pb2.FieldMask,
)
validate_only: bool = proto.Field(
proto.BOOL,
number=4,
)
class ConfigureContactSettingsRequest(proto.Message):
r"""Request for the ``ConfigureContactSettings`` method.
Attributes:
registration (str):
Required. The name of the ``Registration`` whose contact
settings are being updated, in the format
``projects/*/locations/*/registrations/*``.
contact_settings (google.cloud.domains_v1.types.ContactSettings):
Fields of the ``ContactSettings`` to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The field mask describing which fields to update
as a comma-separated list. For example, if only the
registrant contact is being updated, the ``update_mask`` is
``"registrant_contact"``.
contact_notices (MutableSequence[google.cloud.domains_v1.types.ContactNotice]):
The list of contact notices that the caller acknowledges.
The notices needed here depend on the values specified in
``contact_settings``.
validate_only (bool):
Validate the request without actually
updating the contact settings.
"""
registration: str = proto.Field(
proto.STRING,
number=1,
)
contact_settings: "ContactSettings" = proto.Field(
proto.MESSAGE,
number=2,
message="ContactSettings",
)
update_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=3,
message=field_mask_pb2.FieldMask,
)
contact_notices: MutableSequence["ContactNotice"] = proto.RepeatedField(
proto.ENUM,
number=4,
enum="ContactNotice",
)
validate_only: bool = proto.Field(
proto.BOOL,
number=5,
)
class ExportRegistrationRequest(proto.Message):
r"""Request for the ``ExportRegistration`` method.
Attributes:
name (str):
Required. The name of the ``Registration`` to export, in the
format ``projects/*/locations/*/registrations/*``.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class DeleteRegistrationRequest(proto.Message):
r"""Request for the ``DeleteRegistration`` method.
Attributes:
name (str):
Required. The name of the ``Registration`` to delete, in the
format ``projects/*/locations/*/registrations/*``.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class RetrieveAuthorizationCodeRequest(proto.Message):
r"""Request for the ``RetrieveAuthorizationCode`` method.
Attributes:
registration (str):
Required. The name of the ``Registration`` whose
authorization code is being retrieved, in the format
``projects/*/locations/*/registrations/*``.
"""
registration: str = proto.Field(
proto.STRING,
number=1,
)
class ResetAuthorizationCodeRequest(proto.Message):
r"""Request for the ``ResetAuthorizationCode`` method.
Attributes:
registration (str):
Required. The name of the ``Registration`` whose
authorization code is being reset, in the format
``projects/*/locations/*/registrations/*``.
"""
registration: str = proto.Field(
proto.STRING,
number=1,
)
class RegisterParameters(proto.Message):
r"""Parameters required to register a new domain.
Attributes:
domain_name (str):
The domain name. Unicode domain names are
expressed in Punycode format.
availability (google.cloud.domains_v1.types.RegisterParameters.Availability):
Indicates whether the domain is available for registration.
This value is accurate when obtained by calling
``RetrieveRegisterParameters``, but is approximate when
obtained by calling ``SearchDomains``.
supported_privacy (MutableSequence[google.cloud.domains_v1.types.ContactPrivacy]):
Contact privacy options that the domain
supports.
domain_notices (MutableSequence[google.cloud.domains_v1.types.DomainNotice]):
Notices about special properties of the
domain.
yearly_price (google.type.money_pb2.Money):
Price to register or renew the domain for one
year.
"""
class Availability(proto.Enum):
r"""Possible availability states of a domain name.
Values:
AVAILABILITY_UNSPECIFIED (0):
The availability is unspecified.
AVAILABLE (1):
The domain is available for registration.
UNAVAILABLE (2):
The domain is not available for registration.
Generally this means it is already registered to
another party.
UNSUPPORTED (3):
The domain is not currently supported by
Cloud Domains, but may be available elsewhere.
UNKNOWN (4):
Cloud Domains is unable to determine domain
availability, generally due to system
maintenance at the domain name registry.
"""
AVAILABILITY_UNSPECIFIED = 0
AVAILABLE = 1
UNAVAILABLE = 2
UNSUPPORTED = 3
UNKNOWN = 4
domain_name: str = proto.Field(
proto.STRING,
number=1,
)
availability: Availability = proto.Field(
proto.ENUM,
number=2,
enum=Availability,
)
supported_privacy: MutableSequence["ContactPrivacy"] = proto.RepeatedField(
proto.ENUM,
number=3,
enum="ContactPrivacy",
)
domain_notices: MutableSequence["DomainNotice"] = proto.RepeatedField(
proto.ENUM,
number=4,
enum="DomainNotice",
)
yearly_price: money_pb2.Money = proto.Field(
proto.MESSAGE,
number=5,
message=money_pb2.Money,
)
class TransferParameters(proto.Message):
r"""Parameters required to transfer a domain from another
registrar.
Attributes:
domain_name (str):
The domain name. Unicode domain names are
expressed in Punycode format.
current_registrar (str):
The registrar that currently manages the
domain.
name_servers (MutableSequence[str]):
The name servers that currently store the
configuration of the domain.
transfer_lock_state (google.cloud.domains_v1.types.TransferLockState):
Indicates whether the domain is protected by a transfer
lock. For a transfer to succeed, this must show
``UNLOCKED``. To unlock a domain, go to its current
registrar.
supported_privacy (MutableSequence[google.cloud.domains_v1.types.ContactPrivacy]):
Contact privacy options that the domain
supports.
yearly_price (google.type.money_pb2.Money):
Price to transfer or renew the domain for one
year.
"""
domain_name: str = proto.Field(
proto.STRING,
number=1,
)
current_registrar: str = proto.Field(
proto.STRING,
number=2,
)
name_servers: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
transfer_lock_state: "TransferLockState" = proto.Field(
proto.ENUM,
number=4,
enum="TransferLockState",
)
supported_privacy: MutableSequence["ContactPrivacy"] = proto.RepeatedField(
proto.ENUM,
number=5,
enum="ContactPrivacy",
)
yearly_price: money_pb2.Money = proto.Field(
proto.MESSAGE,
number=6,
message=money_pb2.Money,
)
class AuthorizationCode(proto.Message):
r"""Defines an authorization code.
Attributes:
code (str):
The Authorization Code in ASCII. It can be
used to transfer the domain to or from another
registrar.
"""
code: str = proto.Field(
proto.STRING,
number=1,
)
class OperationMetadata(proto.Message):
r"""Represents the metadata of the long-running operation. Output
only.
Attributes:
create_time (google.protobuf.timestamp_pb2.Timestamp):
The time the operation was created.
end_time (google.protobuf.timestamp_pb2.Timestamp):
The time the operation finished running.
target (str):
Server-defined resource path for the target
of the operation.
verb (str):
Name of the verb executed by the operation.
status_detail (str):
Human-readable status of the operation, if
any.
api_version (str):
API version used to start the operation.
"""
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
end_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
target: str = proto.Field(
proto.STRING,
number=3,
)
verb: str = proto.Field(
proto.STRING,
number=4,
)
status_detail: str = proto.Field(
proto.STRING,
number=5,
)
api_version: str = proto.Field(
proto.STRING,
number=6,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| null |
packages/google-cloud-domains/google/cloud/domains_v1/types/domains.py
|
domains.py
|
py
| 53,578 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "proto.module",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "proto.Enum",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2.Timestamp",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2.Timestamp",
"line_number": 303,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "google.protobuf.timestamp_pb2.Timestamp",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2.Timestamp",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableMapping",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "proto.MapField",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "proto.STRING",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 345,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 400,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 448,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 464,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 465,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 469,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 474,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 499,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 500,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 504,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 508,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 509,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 514,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 534,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 603,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "proto.INT32",
"line_number": 626,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 629,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 630,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 634,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 635,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 640,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 644,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 666,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 667,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 670,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 671,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 674,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 675,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 680,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 685,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 686,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 691,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 691,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 692,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 698,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 727,
"usage_type": "attribute"
},
{
"api_name": "google.type.postal_address_pb2.PostalAddress",
"line_number": 743,
"usage_type": "attribute"
},
{
"api_name": "google.type.postal_address_pb2",
"line_number": 743,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 743,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 744,
"usage_type": "attribute"
},
{
"api_name": "google.type.postal_address_pb2.PostalAddress",
"line_number": 746,
"usage_type": "attribute"
},
{
"api_name": "google.type.postal_address_pb2",
"line_number": 746,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 748,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 749,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 752,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 753,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 757,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 761,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 762,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 766,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 767,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 771,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 772,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 776,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 777,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 783,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 795,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 796,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 799,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 800,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 805,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 813,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 813,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 814,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 820,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 832,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 833,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 836,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 837,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 842,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 851,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 852,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 858,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 887,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 888,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 891,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 892,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 896,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 896,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 897,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 901,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 901,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 902,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2.Money",
"line_number": 906,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2",
"line_number": 906,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 906,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 907,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2.Money",
"line_number": 909,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2",
"line_number": 909,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 911,
"usage_type": "call"
},
{
"api_name": "proto.BOOL",
"line_number": 912,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 917,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 929,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 930,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 933,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 934,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 939,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 948,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 949,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 955,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 989,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 990,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 993,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 994,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 998,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 998,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 999,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2.Money",
"line_number": 1003,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2",
"line_number": 1003,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1003,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1004,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2.Money",
"line_number": 1006,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2",
"line_number": 1006,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1008,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1009,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1013,
"usage_type": "call"
},
{
"api_name": "proto.BOOL",
"line_number": 1014,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1019,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1055,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1056,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1059,
"usage_type": "call"
},
{
"api_name": "proto.INT32",
"line_number": 1060,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1063,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1064,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1067,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1068,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1073,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 1089,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 1089,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1090,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1094,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1095,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1100,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1109,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1110,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1115,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1127,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1128,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2.FieldMask",
"line_number": 1132,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2",
"line_number": 1132,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1132,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1133,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2.FieldMask",
"line_number": 1135,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2",
"line_number": 1135,
"usage_type": "name"
},
{
"api_name": "proto.Message",
"line_number": 1139,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1156,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1157,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1160,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1161,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2.FieldMask",
"line_number": 1165,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2",
"line_number": 1165,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1165,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1166,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2.FieldMask",
"line_number": 1168,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2",
"line_number": 1168,
"usage_type": "name"
},
{
"api_name": "proto.Message",
"line_number": 1172,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1199,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1200,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1203,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1204,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2.FieldMask",
"line_number": 1208,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2",
"line_number": 1208,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1208,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1209,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2.FieldMask",
"line_number": 1211,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2",
"line_number": 1211,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1213,
"usage_type": "call"
},
{
"api_name": "proto.BOOL",
"line_number": 1214,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1219,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1243,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1244,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1247,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1248,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2.FieldMask",
"line_number": 1252,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2",
"line_number": 1252,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1252,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1253,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2.FieldMask",
"line_number": 1255,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.field_mask_pb2",
"line_number": 1255,
"usage_type": "name"
},
{
"api_name": "typing.MutableSequence",
"line_number": 1257,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 1257,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 1258,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1262,
"usage_type": "call"
},
{
"api_name": "proto.BOOL",
"line_number": 1263,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1268,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1277,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1278,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1283,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1292,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1293,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1298,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1308,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1309,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1314,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1324,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1325,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1330,
"usage_type": "attribute"
},
{
"api_name": "proto.Enum",
"line_number": 1353,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1379,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1380,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1383,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 1384,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 1388,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 1388,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 1389,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 1393,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 1393,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 1394,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2.Money",
"line_number": 1398,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2",
"line_number": 1398,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1398,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1399,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2.Money",
"line_number": 1401,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2",
"line_number": 1401,
"usage_type": "name"
},
{
"api_name": "proto.Message",
"line_number": 1405,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1432,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1433,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1436,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1437,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 1440,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 1440,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1441,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1444,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 1445,
"usage_type": "attribute"
},
{
"api_name": "typing.MutableSequence",
"line_number": 1449,
"usage_type": "name"
},
{
"api_name": "proto.RepeatedField",
"line_number": 1449,
"usage_type": "call"
},
{
"api_name": "proto.ENUM",
"line_number": 1450,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2.Money",
"line_number": 1454,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2",
"line_number": 1454,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1454,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1455,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2.Money",
"line_number": 1457,
"usage_type": "attribute"
},
{
"api_name": "google.type.money_pb2",
"line_number": 1457,
"usage_type": "name"
},
{
"api_name": "proto.Message",
"line_number": 1461,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1471,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1472,
"usage_type": "attribute"
},
{
"api_name": "proto.Message",
"line_number": 1477,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2.Timestamp",
"line_number": 1498,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2",
"line_number": 1498,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1498,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1499,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2.Timestamp",
"line_number": 1501,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2",
"line_number": 1501,
"usage_type": "name"
},
{
"api_name": "google.protobuf.timestamp_pb2.Timestamp",
"line_number": 1503,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2",
"line_number": 1503,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1503,
"usage_type": "call"
},
{
"api_name": "proto.MESSAGE",
"line_number": 1504,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2.Timestamp",
"line_number": 1506,
"usage_type": "attribute"
},
{
"api_name": "google.protobuf.timestamp_pb2",
"line_number": 1506,
"usage_type": "name"
},
{
"api_name": "proto.Field",
"line_number": 1508,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1509,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1512,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1513,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1516,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1517,
"usage_type": "attribute"
},
{
"api_name": "proto.Field",
"line_number": 1520,
"usage_type": "call"
},
{
"api_name": "proto.STRING",
"line_number": 1521,
"usage_type": "attribute"
}
] |
240641241
|
import logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
ss="This is a test sentence."
sss="The same to last one."
logger.info("111 %s" % ss)
logger.info("111")
| null |
examples/ttt.py
|
ttt.py
|
py
| 339 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
}
] |
490664792
|
from circuitbreaker import CircuitBreaker
from cache_driver import CacheDriver
import logging
from logstash_async.handler import AsynchronousLogstashHandler
from time import sleep
from sanic import response
# Setup elk stack
# host_logger = 'localhost'
host_logger = 'logstash'
# host_logger = 'elk'
port_logger = 5000
# Get you a test logger
test_logger = logging.getLogger('python-logstash-logger')
# Set it to whatever level you want - default will be info
test_logger.setLevel(logging.DEBUG)
# Create a handler for it
async_handler = AsynchronousLogstashHandler(host_logger, port_logger, database_path=None)
# Add the handler to the logger
test_logger.addHandler(async_handler)
###### Define possible cache statuses#####
SUCCESS = 1
CUSTOM_CACHE_FAILED = 2
REDIS_CACHE_FAILED = 3
BOTH_CACHES_FAILED = 4
##########################################
class LoadBalancer:
def any_available(self, service_type):
""" returns True if any service of respective type is available or False if not"""
cache_status = SUCCESS
len_services1 = 0
len_services2 = 0
cache = CacheDriver()
try:
len_services1 = cache.do("custom", 'llen', ["services-" + str(service_type)])
except Exception as e:
test_logger.error("ERROR: Custom cache llen command failed on key" + "services-" + str(service_type) )
test_logger.error(str(e))
cache_status = CUSTOM_CACHE_FAILED
try:
len_services2 = cache.do("redis", 'llen', ["services-" + str(service_type)])
except Exception as e:
test_logger.error("ERROR: Redis cache llen command failed on key" +"services-" + str(service_type) )
test_logger.error(str(e))
cache_status = REDIS_CACHE_FAILED if SUCCESS else BOTH_CACHES_FAILED
if cache_status == BOTH_CACHES_FAILED:
test_logger.error("ERROR: Alert! Both caches failed (redis and custom) on command llen (" + "services-" + str(service_type) + ")")
if len_services1 is None or type(len_services1) is not int:
len_services1 = 0
if len_services2 is None or type(len_services2) is not int:
len_services2 = 0
return max(int(len_services1), int(len_services2))>0 #will return true or false
def next(self, service_type):
cache_status = SUCCESS
sleep(0.3) #give some time for processing previous request
cache = CacheDriver()
service1 = None
service2 = None
try:
service1 = cache.do("custom", 'rpoplpush', ["services-"+str(service_type), "services-"+str(service_type)])
except Exception as e:
test_logger.error("ERROR: Custom cache rpoplpush command failed on" + str(["services-"+str(service_type), "services-"+str(service_type)]))
test_logger.error(str(e))
cache_status = CUSTOM_CACHE_FAILED
try:
service2 = cache.do("redis", 'rpoplpush', ["services-"+str(service_type), "services-"+str(service_type)])
if service2 is not None:
service2 = service2.decode('utf-8')
except Exception as e:
test_logger.error("ERROR: Redis cache rpoplpush command failed on" + str(["services-"+str(service_type), "services-"+str(service_type)]))
test_logger.error(str(e))
cache_status = REDIS_CACHE_FAILED if SUCCESS else BOTH_CACHES_FAILED
if cache_status == BOTH_CACHES_FAILED:
test_logger.error("ERROR: Alert! Both caches rpoplpush command failed on " + str(["services-"+str(service_type), "services-"+str(service_type)]))
if service1 is not None:
circuitbreaker = CircuitBreaker(service1, service_type)
elif service2 is not None:
circuitbreaker = CircuitBreaker(service2, service_type)
return circuitbreaker
| null |
gateway2/loadbalancer.py
|
loadbalancer.py
|
py
| 3,930 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "logstash_async.handler.AsynchronousLogstashHandler",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cache_driver.CacheDriver",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "cache_driver.CacheDriver",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "circuitbreaker.CircuitBreaker",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "circuitbreaker.CircuitBreaker",
"line_number": 105,
"usage_type": "call"
}
] |
485164192
|
from __future__ import annotations
import climetlab as cml
import xarray as xr
from climetlab.normalize import normalize_args
from . import DATA, OBSERVATIONS_DATA_VERSION, URL, S2sDataset
from .extra import cf_conventions
from .s2s_mergers import S2sMerger
PATTERN_OBS = "{url}/{data}/{dataset}/{version}/{parameter}-{date}.nc"
PATTERN_RAWOBS = "{url}/{data}/{dataset}/{version}/{parameter}{grid_string}.nc"
GRID_STRING = {
"240x121": "",
"121x240": "",
"720x360": "_720x360",
"360x720": "_720x360",
}
class Observations(S2sDataset):
terms_of_use = (
S2sDataset.terms_of_use
+ "\n"
+ (
" This dataset has been dowloaded from IRIDL. By downloading this data you also agree to "
"the terms and conditions defined at https://iridl.ldeo.columbia.edu."
)
)
class RawObservations(Observations):
valid_parameters = ["t2m", "pr"]
dataset = "observations"
@normalize_args(parameter=valid_parameters)
def __init__(self, parameter=None, grid="240x121", version=OBSERVATIONS_DATA_VERSION):
self.version = version
self.grid_string = GRID_STRING[grid]
if parameter is None:
parameter = self.valid_parameters
parameter = cf_conventions(parameter)
request = dict(
url=URL,
data=DATA,
parameter=parameter,
dataset=self.dataset,
version=self.version,
grid_string=self.grid_string,
)
self.source = cml.load_source("url-pattern", PATTERN_RAWOBS, request, merger="merge()")
def to_xarray(self, like=None):
ds = self.source.to_xarray()
if isinstance(like, xr.Dataset):
from .extra import forecast_like_observations
ds = forecast_like_observations(like, ds)
return ds
class PreprocessedObservations(Observations):
dataset = None
valid_parameters = ["t2m", "tp"]
@normalize_args(date="date-list(%Y%m%d)", parameter=["t2m", "tp"])
def __init__(self, date, parameter=None, version=OBSERVATIONS_DATA_VERSION):
if parameter is None:
parameter = self.valid_parameters
parameter = cf_conventions(parameter)
self.version = version
self.date = date
self.parameter = parameter
sources = []
for p in parameter:
request = self._make_request(p)
sources.append(
cml.load_source("url-pattern", PATTERN_OBS, request, merger=S2sMerger(engine="netcdf4"))
# cml.load_source("url-pattern", PATTERN_OBS, request, merger=S2sMerger(engine="h5netcdf"))
# cml.load_source("url-pattern", PATTERN_OBS, request, merger='concat(concat_dim=time_forecast)')
)
self.source = cml.load_source("multi", sources, merger="merge()")
def _make_request(self, parameter):
request = dict(
url=URL,
data=DATA,
parameter=parameter,
dataset=self.dataset,
date=self.date,
version=self.version,
)
return request
class TrainingOutputReference(PreprocessedObservations):
dataset = "training-output-reference"
class TestOutputReference(PreprocessedObservations):
dataset = "test-output-reference"
HindcastLikeObservations = TrainingOutputReference
ForecastLikeObservations = TestOutputReference
| null |
climetlab_s2s_ai_challenge/observations.py
|
observations.py
|
py
| 3,421 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "extra.cf_conventions",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "climetlab.load_source",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "climetlab.normalize.normalize_args",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "xarray.Dataset",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "extra.forecast_like_observations",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "extra.cf_conventions",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "climetlab.load_source",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "s2s_mergers.S2sMerger",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "climetlab.load_source",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "climetlab.normalize.normalize_args",
"line_number": 69,
"usage_type": "call"
}
] |
322465304
|
from pandas import read_csv
import numpy as np
import matplotlib.pyplot as plt
import ConfigParser
import argparse
import seaborn as sns
WARMUP_SECS = 30
COOLDOWN_SECS = 0
def cleandf(df):
start_time = np.min(df['epoch'])
end_time = np.max(df['epoch'])
new_start_time = start_time + (WARMUP_SECS)
new_end_time = end_time - (COOLDOWN_SECS)
df = df[df['epoch'] > new_start_time]
df = df[df['epoch'] < new_end_time]
df['t'] = (df['epoch'] - new_start_time) / 60.0
df['send'] = df['send'] / 1024.0
df['recv'] = df['recv'] / 1024.0
df['used'] = df['used'] / 1024.0 ** 2
df['free'] = df['free'] / 1024.0 ** 2
return df
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dstat_csv", help="Dstat CSV file")
args = parser.parse_args()
df = cleandf(read_csv(args.dstat_csv))
# CPU related stuff
cpu_usage = df['usr'] + df['sys']
# plt.plot(df['t'], df['usr'], alpha=0.7, label='usr')
# plt.plot(df['t'], df['sys'], alpha=0.7, label='sys')
# plt.plot(df['t'], df['idl'], alpha=0.7, label='idl')
plt.plot(df['t'], cpu_usage, alpha=0.7, label='CPU usage')
plt.xlim(xmin=0.0)
plt.ylim(ymin=0.0)
plt.legend(loc="best", fancybox=True, framealpha=0.5)
# plt.grid()
plt.xlabel('Time (in mins)')
plt.ylabel('Percentage')
plt.show()
# Network related
plt.plot(df['t'], df['send'], alpha=0.7, label='send')
plt.plot(df['t'], df['recv'], alpha=0.7, label='recv')
plt.xlim(xmin=0.0)
plt.ylim(ymin=0.0)
plt.legend(loc="best", fancybox=True, framealpha=0.5)
plt.grid()
plt.xlabel('Time (in mins)')
plt.ylabel('Bytes')
plt.show()
# Memory related
plt.plot(df['t'], df['used'], alpha=0.7, label='used')
plt.plot(df['t'], df['free'], alpha=0.7, label='free')
plt.xlim(xmin=0.0)
plt.ylim(ymin=0.0)
plt.legend(loc="best", fancybox=True, framealpha=0.5)
plt.grid()
plt.xlabel('Time (in mins)')
plt.ylabel('Megabytes')
plt.show()
if __name__ == '__main__':
main()
| null |
MessagingSystemScripts/harware_metrics.py
|
harware_metrics.py
|
py
| 2,066 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.min",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 74,
"usage_type": "name"
}
] |
407529485
|
#!/usr/bin/env python
# -*- encoding: UTF-8 -*-
# @Author : Administrator
# @File : DatabaseUtil.py
# @Version: 1.0.0
# @Time : 2019/8/16 13:02
# @Project: reptile-python
# @Package:
# @Software: PyCharm
import os
import sqlite3
import sys
import pandas
import pymysql
def select(connect, sql):
"""
游标对象执行SQL
:param sql:
:param connect:
:return:
"""
try:
# 创建一个游标对象 cursor
cursor = connect.cursor()
# 执行SQL
cursor.execute(sql)
# 使用 fetchone() 方法获取单条数据.
# data = cursor.fetchone()
# 获取所有数据
return cursor.fetchall()
except sqlite3.OperationalError as e:
print(sql)
raise e
finally:
# 关闭游标
cursor.close()
# 关闭数据库连接
connect.close()
def execute_commit(connect, sql):
"""
游标对象执行SQL并提交事物
:param connect:
:param sql:
:return:
"""
try:
# 创建一个游标对象 cursor
cursor = connect.cursor()
# 执行SQL
cursor.execute(sql)
# 提交事物
connect.commit()
# 插入操作后获得自增ID
# return cursor.lastrowid
# 操作后获取成功行数
# return cursor.arraysize
return cursor.rowcount
except sqlite3.OperationalError as e:
print(sql)
raise e
finally:
# 关闭游标
cursor.close()
# 关闭数据库连接
connect.close()
def is_table_exist(connect, table):
"""
查询表是否存在
:param connect: 连接
:param table: 表名
:return:
"""
sql = f"select name from sqlite_master where type='table' and name='{table}'"
res = select(connect, sql)
if len(res) == 0:
return False
return True
class Mysql:
def __init__(self, host, port, user, password, database, charset="UTF-8"):
"""
初始化MySQL配置
:param host: 连接主机地址
:param port: 端口
:param user: 用户
:param password: 密码
:param database: 数据库
:param charset: 字符编码,默认UTF-8
"""
self.host = host
self.port = port
self.user = user
self.password = password
self.db = database
self.charset = charset
def connect(self):
"""
获取链接
:return:
"""
return pymysql.connect(
host=self.host,
port=self.port,
user=self.user,
password=self.password,
db=self.db,
charset=self.charset
)
def execute_commit(self, sql):
"""
插入数据、更新数据、删除数据
:param sql:
:return:
"""
execute_commit(self.connect(), sql)
def select(self, sql):
"""
更新数据
:param sql:
:return:
"""
return select(self.connect(), sql)
def is_table_exist(self, table):
"""
查询表是否存在
:param table: 表名
:return:
"""
return is_table_exist(self.connect(), table)
def pandas_select(self, sql):
"""
用pandas库的read_sql获取Mysql数据库数据
:param sql:
:return:
"""
try:
# 创建连接
conn = self.connect()
return pandas.read_sql(sql, conn)
finally:
# 关闭数据库连接
conn.close()
class Sqlite3:
def __init__(self, database, charset="UTF-8"):
"""
初始化Sqlite3配置
:param database: 数据库
:param charset: 字符编码,默认UTF-8
"""
self.db = database
self.charset = charset
self.get_path()
def get_path(self):
if not self.db.endswith('.db'):
self.db = self.db + '.db'
# 分割目录与文件
p, f = os.path.split(self.db)
# 判断目录是否存在
if p != "" and not os.path.exists(p):
# 目录不存在则创建
os.mkdir(p)
# def __enter__(self):
# """
# https://gist.githubusercontent.com/miku/6522074/raw
# 获取连接,返回游标
# :return:
# """
# self.conn = sqlite3.connect(self.db)
# self.cursor = self.conn.cursor()
# return self.cursor
#
# def __exit__(self, exc_class, exc, traceback):
# self.conn.commit()
# self.conn.close()
def connect(self):
"""
获取连接,如果数据库不存在,那么它就会被创建,最后将返回一个数据库对象。
:return:
"""
conn = sqlite3.connect(self.db)
# 插入中文字符
# conn.text_factory = str
return conn
def execute_commit(self, sql):
"""
插入数据、更新数据、删除数据
:param sql:
:return:
"""
return execute_commit(self.connect(), sql)
def select(self, sql):
"""
更新数据
:param sql:
:return:
"""
return select(self.connect(), sql)
def is_table_exist(self, table):
"""
查询表是否存在
:param table: 表名
:return:
"""
return is_table_exist(self.connect(), table)
| null |
utils/DatabaseUtil.py
|
DatabaseUtil.py
|
py
| 5,462 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlite3.OperationalError",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.OperationalError",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "pymysql.connect",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 199,
"usage_type": "call"
}
] |
620830208
|
# Create your views here.
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from arartekomaps.settings import STATIC_URL
from arartekomaps.locations.models import Location
from arartekomaps.places.models import Place
from arartekomaps.categories.models import Category
from django.utils.translation import ugettext_lazy as _
def split_seq(seq, num_pieces):
""" Split a list in n groups. Useful for listings in cols """
start = 0
for i in xrange(num_pieces):
stop = start + len(seq[i::num_pieces])
yield seq[start:stop]
start = stop
def listing(request, state):
""" Listing cities in an state (probintziak) """
mystate = Location.objects.get(slug=state)
cities = Location.objects.filter(parent=mystate)
city_slices = split_seq(cities, 4)
hidesearch = True
return render_to_response('listing.html', locals(), context_instance=RequestContext(request)
)
def location(request,state,city,maincat='',subcat=''):
""" Default view for a city """
city = get_object_or_404(Location, slug=city)
rootcats = []
subcats = []
places_here = Place.objects.filter(city=city)
for mplace in places_here:
thiscat = mplace.category
rootcat = thiscat.get_root()
if rootcat not in rootcats:
rootcats.append(rootcat)
if maincat and maincat==rootcat.slug and thiscat.get_level()>0 and not thiscat in subcats:
subcats.append(thiscat)
if maincat:
if subcat:
parentcat = get_object_or_404(Category, slug=maincat)
childcat = get_object_or_404(Category, slug=subcat)
pagetitle = _("%(parentcatname)s: %(childcatname)s in %(cityname)s") % {'parentcatname':_('cat_%s' % parentcat.name), 'childcatname':_('cat_%s' % childcat.name), 'cityname':city.name}
places = Place.objects.filter(city=city, category=childcat)[:20]
else:
parentcat = get_object_or_404(Category, slug=maincat)
pagetitle = _("%(parentcatname)s in %(cityname)s") % {'parentcatname':_('cat_%s' % parentcat.name), 'cityname':city.name}
places = Place.objects.filter(city=city, category__in=parentcat.get_descendants(include_self=True))[:20]
pass
else:
places = Place.objects.filter(city=city)[:20]
pagetitle = city.name
return render_to_response('location.html', locals(), context_instance=RequestContext(request)
)
| null |
arartekomaps/locations/views.py
|
views.py
|
py
| 2,537 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "arartekomaps.locations.models.Location.objects.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "arartekomaps.locations.models.Location.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "arartekomaps.locations.models.Location",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "arartekomaps.locations.models.Location.objects.filter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "arartekomaps.locations.models.Location.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "arartekomaps.locations.models.Location",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "arartekomaps.locations.models.Location",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "arartekomaps.places.models.Place.objects.filter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "arartekomaps.places.models.Place.objects",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "arartekomaps.places.models.Place",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "arartekomaps.categories.models.Category",
"line_number": 47,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "arartekomaps.categories.models.Category",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "arartekomaps.places.models.Place.objects.filter",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "arartekomaps.places.models.Place.objects",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "arartekomaps.places.models.Place",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "arartekomaps.categories.models.Category",
"line_number": 52,
"usage_type": "argument"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "arartekomaps.places.models.Place.objects.filter",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "arartekomaps.places.models.Place.objects",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "arartekomaps.places.models.Place",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "arartekomaps.places.models.Place.objects.filter",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "arartekomaps.places.models.Place.objects",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "arartekomaps.places.models.Place",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 61,
"usage_type": "call"
}
] |
490515688
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ucheshi.settings')
app = Celery('ucheshi')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.conf.beat_schedule = {
'get_joke_3s': {
'task': 'cheshi.tasks.get_joke',
'schedule': 3.0
}
}
app.autodiscover_tasks()
| null |
Realtime-Broadcasting-API/ucheshi/ucheshi/celery.py
|
celery.py
|
py
| 347 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ.setdefault",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "celery.Celery",
"line_number": 7,
"usage_type": "call"
}
] |
161653145
|
import os
import wave
import random
import tempfile
from subprocess import Popen, DEVNULL
import threading
from concurrent.futures import ThreadPoolExecutor, wait
from typing import List, Text
MIDI_PATH = './clean_midi'
SAVE_PATH = './dataset'
FREQ_TABLE_PATH = './freq_table'
SAMPLES = 100 * 3 # must be multiple of 3
PARALLEL = 8
def find_all_midi(path) -> List[Text]:
midi_path = []
for d in os.listdir(path):
p = os.path.join(path, d)
if not os.path.isdir(p):
continue
for m in os.listdir(p):
if m.startswith('.') or not m.endswith('.mid'):
continue
midi_path.append(os.path.join(p, m))
midi_path.sort()
return midi_path
def midi_to_wav(in_path, out_path, freq_table=None):
cmd = ['timidity', '-Ow', '-o', out_path]
if freq_table:
cmd.extend(['-Z', freq_table])
cmd.append(in_path)
try:
p = Popen(cmd, stdout=DEVNULL, stderr=DEVNULL)
assert p.wait() == 0
except Exception:
def repr_safe(s):
if set(s) & set('"&$ ,'):
return repr(s)
return s
cmdline = ' '.join(repr_safe(i) for i in cmd)
raise RuntimeError(cmdline) from None
def cut_wav(in_path, out_path, begin_sec=None, dura_sec=30.):
with wave.open(in_path, 'rb') as f:
nchannels, sampwidth, framerate, nframes, *_ = f.getparams()
assert sampwidth == 2, (
"doesn't support sample width other that 16-bit, current: %d" %
sampwidth * 8)
read_frame = int(dura_sec * framerate)
if begin_sec is None:
assert read_frame <= nframes, 'duration out of range'
begin_frame = random.randint(0, nframes - read_frame)
else:
begin_frame = int(begin_sec * framerate)
assert begin_frame + read_frame <= nframes, 'read out of range'
# each frame is stored as (L, R) if it's stereo
f.readframes(begin_frame)
data = f.readframes(read_frame)
with wave.open(out_path, 'wb') as f:
f.setnchannels(nchannels)
f.setsampwidth(sampwidth)
f.setframerate(framerate)
f.writeframes(data)
completed = 0
completed_lock = threading.Lock()
def task(i, temper):
global completed
def progress():
return '[%5d/%5d]' % (completed, SAMPLES)
noret = {'end': '', 'flush': True}
in_path = midi_path[i]
out_dir = os.path.join(SAVE_PATH, temper)
os.makedirs(out_dir, exist_ok=True)
out_path = os.path.join(out_dir, '%05d.wav' % i)
_, tmp_path = tempfile.mkstemp('.wav')
print('%s > %s\n%s\r' % (in_path, out_path, progress()), **noret)
try:
freq_table = os.path.join(FREQ_TABLE_PATH, '%s.txt' % temper)
midi_to_wav(in_path, tmp_path, freq_table)
cut_wav(tmp_path, out_path)
except Exception as e:
print(e)
raise e from None
finally:
os.remove(tmp_path)
with completed_lock:
completed += 1
print('%s\r' % progress(), **noret)
if __name__ == '__main__':
midi_path = find_all_midi(MIDI_PATH)
print('dataset size:', len(midi_path))
random.seed(0)
sample = random.sample(range(len(midi_path)), SAMPLES)
print('samples size:', len(sample))
tasks = []
with ThreadPoolExecutor(PARALLEL) as t:
for i in range(0, len(sample), 3):
tasks.append(t.submit(task, sample[i], 'equal'))
tasks.append(t.submit(task, sample[i + 1], 'pure'))
tasks.append(t.submit(task, sample[i + 2], 'pytha'))
wait(tasks)
| null |
python/gen_wav.py
|
gen_wav.py
|
py
| 3,594 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.listdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Text",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "wave.open",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "wave.open",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "threading.Lock",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkstemp",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.wait",
"line_number": 119,
"usage_type": "call"
}
] |
108482689
|
#!/usr/bin/env python
# coding: utf-8
"""Export des ingrédients et des processes du builder"""
from bw2data.project import projects
from food.impacts import impacts as impacts_definition
import bw2calc
import bw2data
import functools
import hashlib
import json
import uuid
# Input
PROJECT = "Food"
DBNAME = "Agribalyse 3.1.1"
BIOSPHERE = DBNAME + " biosphere"
ACTIVITIES = "activities.json"
IMPACTS = "../../public/data/impacts.json" # TODO move the impact definition somewhere else and remove base impact
# Output
INGREDIENTS = "../../public/data/food/ingredients.json"
BUILDER = "../../public/data/food/processes.json"
# maximum variation for new impacts compared to old impacts
projects.create_project(PROJECT, activate=True, exist_ok=True)
bw2data.config.p["biosphere_database"] = BIOSPHERE
db = bw2data.Database(DBNAME)
@functools.cache
def search(name):
results = db.search(name)
assert len(results) >= 1, f"'{name}' was not found in Brightway"
return results[0]
def find_id(activity):
# if this is a complex ingredient, the id is the one constructed by ecobalyse
if "ratio" in activity.keys():
return str(
uuid.UUID(
hashlib.md5(
f"{activity['id']}, constructed by Ecobalyse".encode("utf-8")
).hexdigest()
)
)
else:
return search(activity["search"])["Process identifier"]
if __name__ == "__main__":
# backup the previous builder with old impacts
with open(BUILDER) as f:
oldbuilder = json.load(f)
with open(ACTIVITIES, "r") as f:
activities = json.load(f)
print("Creating ingredient list...")
ingredients = [
{
"id": activity["id"],
"name": activity["name"],
"categories": [c for c in activity["categories"] if c != "ingredient"],
"default": find_id(activity),
"default_origin": activity["default_origin"],
"raw_to_cooked_ratio": activity["raw_to_cooked_ratio"],
"density": activity["density"],
"inedible_part": activity["inedible_part"],
"transport_cooling": activity["transport_cooling"],
"visible": activity["visible"],
"complements": activity.get("complements", []),
}
for activity in activities
if activity["category"] == "ingredient"
]
# cleanup unuseful attributes
for ingredient in ingredients:
if (
"animal_product" not in ingredient["categories"]
and "dairy_product" not in ingredient["categories"]
and "animal-welfare" in ingredient.get("complements")
):
del ingredient["complements"]["animal-welfare"]
print("Creating builder process list...")
builder = {
activity["id"]: {
"id": activity["id"],
"name": search(activity["search"])["name"],
"displayName": activity["name"],
"unit": search(activity["search"])["unit"],
"identifier": find_id(activity),
"system_description": search(activity["search"])["System description"],
"category": activity.get("category"),
"comment": list(search(activity["search"]).production())[0]["comment"],
# those are removed at the end:
"search": activity["search"],
"ratio": activity.get("ratio"),
"subingredient_default": activity.get("subingredient_default"),
"subingredient_organic": activity.get("subingredient_organic"),
"impacts": {"bvi": activity.get("bvi", 0)},
}
for activity in activities
}
# remove empty category
for p in builder:
if not builder[p]["category"]:
del builder[p]["category"]
# remove complex ingredient attributes on simple ingredients
for processid in builder.keys():
if not builder[processid]["ratio"]:
del builder[processid]["ratio"]
del builder[processid]["subingredient_default"]
del builder[processid]["subingredient_organic"]
# check that all three attributes are present on complex ingredients
for activity in activities:
if any(
[
key in activity
for key in ("ratio", "subingredient_default", "subingredient_organic")
]
):
assert all(
[
key in activity
for key in (
"ratio",
"subingredient_default",
"subingredient_organic",
)
]
), f"{activity} seems is missing either ratio or subingredient_default or subingredient_organic"
# compute the impacts of base processes
print("Computing impacts:")
for index, (processid, process) in enumerate(
# keep complex ingredients at the end since they depend on subingredient processes
sorted(builder.items(), key=lambda x: "ratio" in x[1])
):
print(
"("
+ (index) * "•"
+ (len(builder) - index) * " "
+ f") {str(index)}/{len(builder)}",
end="\r",
)
lca = bw2calc.LCA({search(process["search"]): 1})
lca.lci()
for key, method in impacts_definition.items():
lca.switch_method(method)
lca.lcia()
process.setdefault("impacts", {})[key] = float("{:.10g}".format(lca.score))
# etf-o = etf-o1 + etf-o2
process["impacts"]["etf-o"] = (
process["impacts"]["etf-o1"] + process["impacts"]["etf-o2"]
)
del process["impacts"]["etf-o1"]
del process["impacts"]["etf-o2"]
# etf = etf1 + etf2
process["impacts"]["etf"] = (
process["impacts"]["etf1"] + process["impacts"]["etf2"]
)
del process["impacts"]["etf1"]
del process["impacts"]["etf2"]
# Now compute an identifier for complex ingredients
# Compute impacts of complex ingredients
# and tweak some attributes
if "ratio" in process:
for impact in process["impacts"]:
# The ratio is the quantity of simple ingredient necessary to produce 1 unit of complex ingredient.
# You need 1.16 kg of wheat (simple) to produce 1 kg of flour (complex) -> ratio = 1.16
# Formula: Impact farine bio = impact farine conventionnel + ratio * ( impact blé bio - impact blé conventionnel)
if impact == "bvi" and (
process["impacts"][impact] == 0
or builder[process["subingredient_organic"]]["impacts"][impact] == 0
or builder[process["subingredient_default"]]["impacts"][impact] == 0
):
# if the base ingredient has no bvi, neither the organic
continue
process["impacts"][impact] = process["impacts"][impact] + process[
"ratio"
] * (
builder[process["subingredient_organic"]]["impacts"][impact]
- builder[process["subingredient_default"]]["impacts"][impact]
)
process["name"] = f"{processid}, constructed by Ecobalyse"
process["system_description"] = "Ecobalyse"
# remove unneeded attributes
for attribute in (
"search",
"ratio",
"subingredient_default",
"subingredient_organic",
):
if attribute in process:
del process[attribute]
print("Computing corrected impacts...")
with open(IMPACTS, "r") as f:
impacts_ecobalyse = json.load(f)
corrections = {
k: v["correction"] for (k, v) in impacts_ecobalyse.items() if "correction" in v
}
for process in builder.values():
# compute corrected impacts
for impact_to_correct, correction in corrections.items():
corrected_impact = 0
for correction_item in correction: # For each sub-impact and its weighting
sub_impact_name = correction_item["sub-impact"]
if sub_impact_name in process["impacts"]:
sub_impact = process["impacts"].get(sub_impact_name, 1)
corrected_impact += sub_impact * correction_item["weighting"]
del process["impacts"][sub_impact_name]
process["impacts"][impact_to_correct] = corrected_impact
with open(INGREDIENTS, "w") as outfile:
json.dump(ingredients, outfile, indent=2, ensure_ascii=False)
# Add a newline at the end of the file, to avoid creating a diff with editors adding a newline
outfile.write("\n")
print(f"\nExported {len(ingredients)} ingredients to {INGREDIENTS}")
# display impacts that have changed
old = {p["id"]: p["impacts"] for p in oldbuilder}
review = False
changes = []
for p in builder:
for impact in builder[p]["impacts"]:
if old.get(p, {}).get(impact, {}):
percent_change = (
100
* abs(builder[p]["impacts"][impact] - old[p][impact])
/ old[p][impact]
)
if percent_change > 0.1:
changes.append(
{
"trg": impact,
"name": p,
"%diff": percent_change,
"from": old[p][impact],
"to": builder[p]["impacts"][impact],
}
)
review = True
changes.sort(key=lambda c: c["%diff"])
if review:
keys = ("trg", "name", "%diff", "from", "to")
widths = {key: max([len(str(c[key])) for c in changes]) for key in keys}
print("==".join(["=" * widths[key] for key in keys]))
print("Please review the impact changes below")
print("==".join(["=" * widths[key] for key in keys]))
print(" ".join([f"{key.ljust(widths[key])}" for key in keys]))
print("==".join(["=" * widths[key] for key in keys]))
for c in changes:
print(" ".join([f"{str(c[key]).ljust(widths[key])}" for key in keys]))
print("==".join(["=" * widths[key] for key in keys]))
print(" ".join([f"{key.ljust(widths[key])}" for key in keys]))
print("==".join(["=" * widths[key] for key in keys]))
print("Please review the impact changes above")
print("==".join(["=" * widths[key] for key in keys]))
with open(BUILDER, "w") as outfile:
json.dump(list(builder.values()), outfile, indent=2, ensure_ascii=False)
# Add a newline at the end of the file, to avoid creating a diff with editors adding a newline
outfile.write("\n")
print(f"Exported {len(builder)} processes to {BUILDER}")
| null |
data/food/export_builder.py
|
export_builder.py
|
py
| 11,011 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bw2data.project.projects.create_project",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bw2data.project.projects",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "bw2data.config",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "bw2data.Database",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "functools.cache",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "uuid.UUID",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "bw2calc.LCA",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "food.impacts.impacts.items",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "food.impacts.impacts",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 271,
"usage_type": "call"
}
] |
175381617
|
# coding:utf-8
from copy import deepcopy
from typing import List, Dict
class SingerInfoGetter:
""" 获取歌手信息的类 """
def __init__(self, albumInfo_list: list) -> None:
self.albumInfo_list = deepcopy(albumInfo_list) # type:List[dict]
self.singerInfos = self.getSingerInfos(self.albumInfo_list)
@staticmethod
def getSingerInfos(albumInfo_list: list) -> Dict[str, dict]:
""" 获取歌手信息 """
singerInfos = {}
year = '0'
for albumInfo in albumInfo_list:
singer = albumInfo['singer']
genre = albumInfo['genre']
year_ = albumInfo.get('year', '0')
# 如果字典中没有该歌手的信息就插入一个
if singer not in singerInfos:
singerInfos[singer] = {
"singer": singer,
"genre": genre,
"albumInfo_list": [],
"coverPath": f'singer_avatar/{singer}.jpg'
}
singerInfos[singer]["albumInfo_list"].append(albumInfo)
# 使用最新的专辑流派作为歌手的流派
if year_ >= year:
singerInfos[singer]['genre'] = genre
year = year_
# 排序专辑信息
for singerInfo in singerInfos.values():
singerInfo["albumInfo_list"].sort(
key=lambda i: i.get('year', '0'), reverse=True)
return singerInfos
def updateSingerInfos(self, albumInfo_list: list):
""" 更新歌手信息 """
self.albumInfo_list = deepcopy(albumInfo_list) # type:List[dict]
self.singerInfos = self.getSingerInfos(self.albumInfo_list)
| null |
app/common/meta_data_getter/get_singer_info.py
|
get_singer_info.py
|
py
| 1,711 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "copy.deepcopy",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 49,
"usage_type": "call"
}
] |
382450608
|
# -*- coding: utf-8 -*-
"""
@author: Olav Milian Gran
"""
import datetime
import multiprocessing as mp
import sys
from time import perf_counter
import matplotlib.pyplot as plt
import numpy as np
import sympy as sym
from matplotlib.ticker import MaxNLocator
from linear_elasticity_2d_solver import LinearElasticity2DProblem
from linear_elasticity_2d_solver.default_constants import default_tol
from linear_elasticity_2d_solver.helpers import check_and_make_folder
"""for nice representation of plots"""
sym.init_printing()
fontsize = 20
new_params = {'axes.titlesize': fontsize, 'axes.labelsize': fontsize, 'figure.figsize': (12, 7),
'lines.linewidth': 2, 'lines.markersize': 7, 'ytick.labelsize': fontsize, 'figure.titlesize': fontsize,
'xtick.labelsize': fontsize, 'legend.fontsize': fontsize, 'legend.handlelength': 1.5}
plt.rcParams.update(new_params)
# rho_steal = 8e3 # kg/m^3
alpha = 8e3 * 100 * 9.81 * 0.01 # N/m^2
# Example 2: Gravity in 2D
def f(x, y):
return alpha, 0
def clamped_bc(x, y):
return abs(x) <= default_tol
def make_plots(n, save, q=None, do_errors=True):
now1 = datetime.datetime.now().time() # time object
txt = f"start time: {now1}, n={n}, save={save} \n"
newline = "\n"
line = "-" * 20
save_dict = r"reduced_order_plots"
if save:
save_dict = check_and_make_folder(n, save_dict)
# define problem, can not get from saves, here because we want to set n_rom
s = perf_counter()
le2d = LinearElasticity2DProblem.from_functions(n, f, get_dirichlet_edge_func=clamped_bc, print_info=False)
txt += f"Assembled HF system in {perf_counter() - s} s" + newline
sigma2_dict = {}
mean_err_dict = {}
max_err_dict = {}
n_rom_dict = {}
for mode in ("Uniform", "Gauss-Lobatto"):
sigma2_dict[mode] = {}
mean_err_dict[mode] = {}
max_err_dict[mode] = {}
n_rom_dict[mode] = {}
for grid in (5, 11):
txt += line + newline + f"mode={mode}, gird={grid}" + newline
s = perf_counter()
le2d.build_rb_model(grid=grid, mode=mode, print_info=False)
txt += f"Built RB model in {perf_counter() - s} s" + newline
sigma2_dict[mode][grid] = le2d.singular_values_squared_pod
n_rom_dict[mode][grid] = le2d.n_rom
txt += f"Chosen n_rom={le2d.n_rom}, max use is n_rom_max={le2d.n_rom_max}, " \
+ f"grid size is ns_rom={le2d.ns_rom}, Number of node on one axis is n={n}, " \
+ f"Solution matrix rank: {le2d.solution_matrix_rank}" + newline
txt += "Singular values squared:" + newline
txt += str(le2d.singular_values_squared_pod) + newline
if do_errors:
max_err = np.zeros(le2d.n_rom_max)
mean_err = np.zeros(le2d.n_rom_max)
for n_rom in range(1, le2d.n_rom_max + 1):
errs = np.zeros(le2d.ns_rom)
for i, (e_young, nu_poisson) in enumerate(le2d.e_young_nu_poisson_mat):
# print(i, e_young, nu_poisson)
errs[i] = le2d.error_a_rb(e_young, nu_poisson, n_rom=n_rom)
max_err[n_rom - 1] = np.max(errs)
mean_err[n_rom - 1] = np.mean(errs)
mean_err_dict[mode][grid] = mean_err
max_err_dict[mode][grid] = max_err
# make singular values plot
plt.figure("Singular values")
plt.title(f"Singular values, scaled to $\\sigma_1$, $n={n}$")
for grid in (11, 5):
for mode in ("Gauss-Lobatto", "Uniform"):
sigma2_vec = sigma2_dict[mode][grid]
arg0 = np.argwhere(sigma2_vec >= 0)
sigma_vec = np.sqrt(sigma2_vec[arg0])
rel_sigma_vec = sigma_vec / sigma_vec[0]
plt.semilogy(np.arange(len(rel_sigma_vec)) + 1, rel_sigma_vec, "D-",
label=f"{mode} ${grid}\\times{grid}$", alpha=.8)
plt.xlabel("$i$")
plt.ylabel("$\\sigma_i$")
plt.grid()
plt.legend()
if save:
plt.savefig(save_dict + f"/singular_values.pdf")
plt.show()
# make relative information plot
plt.figure("Relative information content")
plt.title(f"Relative information content, $I(N)$, $n={n}$")
k = 5
for grid in (11, 5):
for mode in ("Gauss-Lobatto", "Uniform"):
n_rom = n_rom_dict[mode][grid]
sigma2_vec = sigma2_dict[mode][grid]
arg0 = np.argwhere(sigma2_vec >= 0)
i_n = np.cumsum(sigma2_vec[arg0]) / np.sum(sigma2_vec[arg0])
plt.plot(np.arange(len(i_n)) + 1, i_n, "D-", label=f"{mode} ${grid}\\times{grid}$", alpha=.8)
plt.plot(n_rom, i_n[n_rom - 1], "b.", alpha=.7, zorder=k)
k += 1
plt.xlabel("$N$")
plt.ylabel("$I(N)$")
plt.ylim(0.999_6, 1.000_05)
plt.grid()
plt.legend()
if save:
plt.savefig(save_dict + f"/relative_information_content.pdf")
plt.show()
if do_errors:
# make error plots
for grid in (5, 11):
fig, ax = plt.subplots(1, 1, num="Reduced order errors v. $N$" + f"{grid}, $n={n}$", figsize=(12, 7))
fig.suptitle("Reduced order errors v. $N$" + f", $n={n}$")
for mode in ("Gauss-Lobatto", "Uniform"):
mean_err = mean_err_dict[mode][grid]
ax.semilogy(np.arange(len(mean_err)) + 1, mean_err, "D-",
label=f"mean: {mode} ${grid}\\times{grid}$", alpha=.8)
for mode in ("Gauss-Lobatto", "Uniform"):
max_err = max_err_dict[mode][grid]
ax.semilogy(np.arange(len(max_err)) + 1, max_err, "D-",
label=f"max: {mode} ${grid}\\times{grid}$", alpha=.8)
ax.set_xlabel("$N$")
ax.set_ylabel("$\\|\\|u_h(\\mu) - Vu_N(\\mu)\\|\\|_a$")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.grid()
# adjust
ax.legend(loc=9, bbox_to_anchor=(0.5, -0.13), ncol=2)
if save:
plt.savefig(save_dict + f"/reduced_order_errors_grid{grid}.pdf", bbox_inches='tight')
plt.show()
now2 = datetime.datetime.now().time() # time object
txt += f"end time: {now2}, n={n}, save={save} \n"
if q is None:
print(txt)
else:
res = "Process" + str(n), txt, now2
q.put(res)
def listener(q, output_file):
"""listens for messages on the q, writes to file. """
with open(output_file, 'w') as time_code_log:
while True:
m = q.get()
if m == 'kill':
time_code_log.write('killed')
break
time_code_log.write(m[0] + '\n')
time_code_log.write(m[1] + '\n')
time_code_log.write(str(m[2]) + '\n')
time_code_log.flush()
# Example 2: Gravity in 2D
def main():
# took some time!!!! (20: 12 min, 40: 40 min, 80: 2 hours 42 min, total: 3 hours 34 min), without multiprocessing
# took some time!!!! (20: 13 min, 40: 41 min, 80: 2 hours 41 min, total: 2 hours 41 min), with multiprocessing
multi_process = True
do_errors = True
# !!! Set to True to save the plots!!!
save = True
n_vec = [20, 40, 80]
text_n_vec = "_".join(str(n) for n in n_vec)
if do_errors:
extra = ""
else:
extra = "_no_errors"
output_file = "reduced_order_plots/time_log_n" + text_n_vec + extra + ".txt"
if multi_process:
# must use Manager queue here, or will not work
manager = mp.Manager()
q = manager.Queue()
pool = mp.Pool(mp.cpu_count())
# put listener to work first
watcher = pool.apply_async(listener, (q, output_file))
jobs = []
for n in n_vec:
job = pool.apply_async(make_plots, (n, save, q, do_errors))
jobs.append(job)
# collect results from the make_plots through the pool result queue
for job in jobs:
job.get()
# now we are done, kill the listener
q.put('kill')
pool.close()
pool.join()
else:
with open(output_file, "w") as time_code_log:
sys.stdout = time_code_log
for n in n_vec:
make_plots(n, save, do_errors=do_errors)
if __name__ == '__main__':
main()
| null |
code_use_for_report/Example_2_Gravity_in_2D/reduced_order_error_rel_info_singular_values.py
|
reduced_order_error_rel_info_singular_values.py
|
py
| 8,365 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sympy.init_printing",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "linear_elasticity_2d_solver.default_constants.default_tol",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "linear_elasticity_2d_solver.helpers.check_and_make_folder",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "linear_elasticity_2d_solver.LinearElasticity2DProblem.from_functions",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "linear_elasticity_2d_solver.LinearElasticity2DProblem",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "numpy.argwhere",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.semilogy",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "numpy.argwhere",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker.MaxNLocator",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.Manager",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 215,
"usage_type": "attribute"
}
] |
387664372
|
import psycopg2
class WorkWithDataBase:
def __init__(self, dbname, user, password, host):
self.dbname = dbname
self.user = user
self.password = password
self.host = host
self.connection, self.cursor = self.get_connection()
# создает подключение
def get_connection(self):
connection = psycopg2.connect(dbname=self.dbname, user=self.user, password=self.password, host=self.host)
connection.autocommit = True
cursor = connection.cursor()
return connection, cursor
# создает таблицы
def create_db(self):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS Course (
id serial PRIMARY KEY NOT NULL,
name VARCHAR(100) NOT NULL);
CREATE TABLE IF NOT EXISTS Student (
id serial PRIMARY KEY NOT NULL,
name VARCHAR(100) NOT NULL,
gpa NUMERIC(10, 2),
birth TIMESTAMP WITH TIME ZONE);
CREATE TABLE IF NOT EXISTS Student_course (
id serial PRIMARY KEY NOT NULL,
student_id INTEGER REFERENCES Student(id) NOT NULL,
course_id INTEGER REFERENCES Course(id) NOT NULL);
""")
print('Таблицы созданы')
return
# создание курса
def create_course(self, name_course):
self.cursor.execute("""INSERT INTO Course
(name) values (%s);""", (name_course,))
print('\tкурс добавлен в таблицу')
return
# возвращает студентов выбранного курса
def get_students(self, course_id):
self.cursor.execute("""SELECT * FROM Student_course WHERE
course_id = (%s);""", (course_id,))
print(self.cursor.fetchall())
return
# записывает студентов на курс
def add_students(self, course_id, student_id):
self.cursor.execute("""INSERT INTO Student_course
(course_id, student_id) values (%s, %s)""", (course_id, student_id))
print('\tстудент записан')
return
# просто создает студента в таблице из словаря
def add_student(self, student):
self.cursor.execute("""INSERT INTO Student
(name, gpa, birth) values (%(name)s, %(gpa)s, %(birth)s);""", student)
return
# посмотреть студента
def get_student(self, student_id):
self.cursor.execute("""SELECT * FROM Student WHERE
id = (%s);""", student_id)
print(self.cursor.fetchall())
return
# посмотреть курсы
def get_course(self):
print('\tдоступные курсы')
self.cursor.execute("""SELECT * FROM Course""")
print(self.cursor.fetchall())
return
def create_student_dict():
student_dict = {}
while True:
print('создание студента(ов): "n" - новый студент, "e" - выход')
input_command = input('\tвведите комманду ')
if input_command == 'n':
name_student = input('\tимя ')
gpa_student = input('\tсредняя оценка ')
birth_student = input('\tдата рождения ')
student = {'name': name_student,
'gpa': gpa_student,
'birth': birth_student}
student_dict.update(student)
if input_command == 'e':
break
return student_dict
if __name__ == '__main__':
print('\nСкрипт для работы с базой PostgreSQL\n'
'Для дальнейшей работы необходимо ввести данные:')
base_name = input('\tназвание базы ')
user = input('\tлогин ')
password = input('\tпароль ')
host = input('\tадрес сервера ')
postgres_db = WorkWithDataBase(base_name, user, password, host)
print('Подключение к базе данных прошло уcпешно')
while True:
print('\nДоступные команды:\n'
'\t"с" - создать таблицу\n'
'\t"cc" - создание курса\n'
'\t"lc" - посмотреть курсы\n'
'\t"g" - показать студентов курса\n'
'\t"a" - записать студента на курс\n'
'\t"s" - создание студента\n'
'\t"l" - посмотреть студента\n'
'\t"q" - выход')
input_command = input('Ввведите команду: ')
if input_command == 'c':
postgres_db.create_db()
if input_command == 'cc':
postgres_db.create_course(input('\tнаберите название курса '))
if input_command == 'lc':
postgres_db.get_course()
if input_command == 'g':
postgres_db.get_students(input('\tнаберите id курса '))
if input_command == 'a':
postgres_db.add_students(input('\tнаберите id курса '), input('\tнаберите id студента '))
if input_command == 's':
postgres_db.add_student(create_student_dict())
if input_command == 'l':
postgres_db.get_student(input('\tнаберите id студента '))
if input_command == 'q':
print('\tвыход')
break
| null |
postgresql.py
|
postgresql.py
|
py
| 5,966 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "psycopg2.connect",
"line_number": 14,
"usage_type": "call"
}
] |
435359009
|
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from mpl_toolkits.mplot3d import Axes3D
INPUT_SIZE = 3
HIDDEN_SIZE = 64
OUTPUT_SIZE = 3
LR = 0.01
# Create dataset
para = (10.0, 28, 8.0/3.0)
#define lorenz equations
def f(state, t):
x, y, z = state
sigma, rho, beta = para
return sigma * (y - x), x * (rho - z) - y, x * y - beta * z
# time points
t_seq = np.arange(0, 40, 0.01)
# initial values of x, y, z
state0 = [1.0, 1.0, 1.0]
# solve ODE
states = odeint(f, state0, t_seq)
inputs = torch.FloatTensor(states[:-1]).view(1, states.shape[0] - 1, 3)
targets = torch.FloatTensor(states[1:]).view(1, states.shape[0] - 1, 3)
print(states[:10])
# Define the model
class GRUNet(nn.Module):
def __init__(self,):
super(GRUNet, self).__init__()
self.gru = nn.GRU(input_size = INPUT_SIZE,
hidden_size = HIDDEN_SIZE,
num_layers = 1,
#num_layers = 2,
batch_first = True)
"""
self.rnn2 = nn.RNN(input_size = HIDDEN_SIZE,
hidden_size = HIDDEN_SIZE,
num_layers = 1,
batch_first = True)
"""
self.linear = nn.Linear(HIDDEN_SIZE, OUTPUT_SIZE)
def forward(self, x, hidden):
out, hidden = self.gru(x,hidden)
# [1,seq_len, h] => [seq_len, h]
# out, hidden = self.rnn2(out1, hidden1)
out = out.view(-1, HIDDEN_SIZE)
out = self.linear(out) # [seq_len, h] => [seq_len, 1]
out = out.unsqueeze(dim=0) #[seq_len, 1] => [1, seq_len, 1]
return out, hidden
# Train the model
model = GRUNet()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), LR)
hidden = None
inputs = Variable(inputs)
targets = Variable(targets)
for iter in range(1001):
output, hidden = model(inputs, hidden)
hidden = hidden.data
loss = criterion(output, targets)
model.zero_grad()
loss.backward()
optimizer.step()
if iter % 100 == 0:
print("Iteration{} : loss {}".format(iter, loss.item()))
# Give any initial point predict the following points and Visualize the result
predictions = []
input_point = inputs[:, 30, :]
for _ in range(2*inputs.shape[1]):
input_point = input_point.view(1, 1, 3)
(pred, hidden) = model(input_point, hidden)
input_point = pred
predictions.append(pred.detach().numpy().ravel())
inputs = inputs.data.numpy().ravel()
predictions = np.asarray(predictions)
print(predictions)
print(predictions[:,0])
fig = plt.figure()
ax = fig.gca(projection='3d')
print(predictions.shape)
ax.plot(predictions[:, 0], predictions[:, 1], predictions[:, 2])
plt.show()
plt.savefig("64_gru.png")
print(states[:10])
fig1 = plt.figure()
ax =fig1.gca(projection = '3d')
ax.plot(states[:,0], states[:,1], states[:,2])
plt.savefig("out_exact.png")
| null |
lorenz_gru.py
|
lorenz_gru.py
|
py
| 3,051 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.switch_backend",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.odeint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torch.nn.GRU",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
}
] |
74839682
|
#!/usr/bin/python3
"""
Review objects that will handles all default RestFul API actions
"""
from api.v1.views import app_views
from models import storage
from models.review import Review
from flask import jsonify, abort, request, make_response
from werkzeug.exceptions import BadRequest
@app_views.route('/places/<place_id>/reviews', methods=['GET', 'POST'],
strict_slashes=False)
def reviews_by_place(place_id):
"""
Handle reviews by place
"""
place = storage.get('Place', place_id)
if place is None:
abort(404, 'Not found')
if request.method == 'GET':
reviews_list = [x.to_dict() for x in place.reviews]
return make_response(jsonify(reviews_list), 200)
elif request.method == 'POST':
data = request.get_json()
if data is None:
abort(400, 'Not a JSON')
if data.get('user_id') is None:
abort(400, 'Missing user_id')
user = storage.get('User', data.get('user_id'))
if user is None:
abort(404, 'Not found')
if data.get('text') is None:
abort(400, 'Missing text')
new_review = Review(**data)
new_review.place_id = place.id
new_review.user_id = user.id
new_review.save()
return make_response(jsonify(**new_review.to_dict()), 201)
@app_views.route('/reviews/<review_id>', methods=['GET', 'PUT', 'DELETE'],
strict_slashes=False)
def reviews(review_id):
"""
Handle reviews by id
"""
review = storage.get('Review', review_id)
if review is None:
abort(404, 'Not found')
if request.method == 'GET':
return make_response(jsonify(review.to_dict()), 200)
elif request.method == 'PUT':
changes = dict()
try:
changes = request.get_json()
if changes is None:
abort(400, 'Not a JSON')
except BadRequest:
abort(400, 'Not a JSON')
ignores = ('id', 'user_id', 'place_id', 'created_at', 'updated_at')
for key, val in changes.items():
if key in ignores:
pass
else:
setattr(review, key, val)
review.save()
return make_response(jsonify(**review.to_dict()), 200)
elif request.method == 'DELETE':
storage.delete(review)
storage.save()
return jsonify({}), 200
| null |
api/v1/views/places_reviews.py
|
places_reviews.py
|
py
| 2,407 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.storage.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.storage.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "models.review.Review",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views.route",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.storage.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "werkzeug.exceptions.BadRequest",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "models.storage.delete",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "models.storage.save",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views.route",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views",
"line_number": 47,
"usage_type": "name"
}
] |
105182208
|
import requests
from django.db import transaction
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.template.loader import render_to_string
from django.urls import reverse
from django.views import generic
from collections import defaultdict
from component_browser.models import PathRequest
from core.db_interaction import save_parameters, get_request_params, get_pipes_params, \
get_pipe_params, get_request_path, save_output
from core.parse_open_api import extract_parameters
from core.pipeline import *
from pipeline.models import Pipe, Pipeline
def edit_pipeline(request, pk):
if request.method == 'POST':
pipeline = Pipeline.objects.get(pk=pk)
pipeline.delete()
return HttpResponseRedirect(reverse('pipeline:index'))
class PipelineView(generic.DetailView):
model = Pipeline
template_name = 'pipeline/pipeline.html'
pk_url_kwarg = "pipeline_pk"
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
sorted_pipe_objs = [pipe for pipe in Pipe.objects.filter(pipe_line=context["pipeline"]).order_by('position')]
get_pipes_params(sorted_pipe_objs)
context['pipes'] = get_pipes_params(sorted_pipe_objs)
context['pipe_id'] = context['pipeline'].id
del context['object']
# del context['pipeline']
return context
class OutputDetailView(generic.TemplateView):
template_name = 'pipeline/output.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['pipe'] = Pipe.objects.get(pk=kwargs['pipe_pk'])
return context
def evaluation(request, pk, param_pk):
if request.method == 'GET':
param = Parameter.objects.get(pk=param_pk)
evaluation_html = param.evaluation_short
if request.is_ajax():
html = render_to_string('pipeline/evaluation.html', {'evaluation': evaluation_html})
return HttpResponse(html)
def render_html(request, *args, **kwargs):
pipe = Pipe.objects.get(pk=kwargs['pipe_pk'])
html_obj = pipe.output
return HttpResponse(html_obj['html'])
class InputDetailView(generic.TemplateView):
template_name = 'pipeline/input.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
pipe = Pipe.objects.get(pk=kwargs['pipe_pk'])
context['pipe'] = get_pipe_params(pipe)
return context
class EmptyPipeView(generic.TemplateView):
template_name = 'pipeline/empty_pipeline.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['pipeline'] = Pipeline.objects.get(pk=kwargs['pk'])
return context
class InputOutputDetailView(generic.TemplateView):
template_name = 'pipeline/input_output.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['pipeline'] = Pipeline.objects.get(pk=kwargs['pipeline_pk'])
pipe_data = get_pipe_params(Pipe.objects.get(pk=kwargs['pipe_pk']))
context['pipe'] = pipe_data
return context
def run(request, pipeline_pk, pipe_pk):
# Initialise validation errors. This is appended to by various parts of this function.
validation_errors = defaultdict(list)
tz = request.session.get('django_timezone', '')
if request.method == 'PUT':
try:
request_data = json.loads(request.body)
# Use the pipe id to get the pipeline id.
pipe = Pipe.objects.get(pk=pipe_pk)
pipeline_id = pipeline_pk
# Extract params from form and validate formats.
# Note. If parameter values were created by an expression. Evaluate the expression
# and save the output as the passed value. What is shown in the text box when
# there is an evaluation is only part of the data to reduce data overhead.
source_params = convert_and_eval_params(request_data, pipeline_id, validation_errors)
# Need to save parameters before making a request, as we load from saved parameters when making the request
# rather than from parameters passed to the browser. This ensures consistency between
# what is visualised and what is sent in requests.
save_parameters(pipe, source_params, validation_errors)
# Don't make the request if there were validation errors during conversion and/or saving parameters
if validation_errors:
return JsonResponse({"errors": validation_errors}, status=400)
return make_request(pipe, source_params)
except Exception as e:
# a catch all
# TODO create a snapshot of the processing to help debugging generic errors.
# E.g. the passed parameters, the target url, the request type etc.
return JsonResponse({"errors": {'general':
["Something unexpected went wrong with the penelope server. "
"Please inform the Penelope developers of the following error: {}"
.format(str(e))]}}, status="500")
def make_request(pipe, source_params):
# Get saved parameters
request_data = get_request_params(pipe)
# Get path to send parameters
request_obj = pipe.request
request_url = get_request_path(request_obj)
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
request = None
try:
if request_obj.type == "POST":
request = requests.post('http://' + request_url, json=request_data, headers=headers)
elif request_obj.type == "GET":
request = requests.get('http://' + request_url, json=request_data, headers=headers)
except Exception as e:
return JsonResponse({"errors": {'general': [
"Could not make request to {}. "
"Perhaps the server is down?".format(e.request.url)]}},
status="500")
return handle_request_response(request, request_url, source_params, pipe)
def handle_request_response(request, request_url, source_params, pipe):
def round_down(num, divisor):
return num - (num % divisor)
try:
content = json.loads(request.content)
except:
content = None
default_error_message = {
500: "Service at {} had some internal problem processing your request."
"No content was returned so no information is given about what went wrong.",
400: "Service at {} reports that the data it received was wrong but no content was provided."
"No content was returned so no information is given about what went wrong.",
300: "Service at {} reports a 'redirection' error occurred. This could potentially mean many different things. "
"No content was returned so no information is given about what went wrong.",
200: "Service at {} reports success but did not return any content."
"No content was returned so no further information is given. This is reported as an error by Penelope"
" as content is expected by Penelope but the service may have executed appropriately."
}
#####################
# handle no content #
#####################
if not content:
return JsonResponse({"errors":
{'general': [default_error_message[round_down(request.status_code, 100)]
.format(request_url)]}},
status=request.status_code)
############################
# handle errors in content #
############################
if 'errors' in content:
# map value names to ids
# TODO decide what to do if the same parameter name occurs more than once in, say, a nested structure
param_name_param_id = {param.name: param.id for param in source_params}
mapped_errors = {key if key == 'general' else 'val-' + param_name_param_id[key]: value for key, value in
content['errors'].items()}
content['errors'] = mapped_errors
return JsonResponse(content, status=request.status_code)
##################
# handle success #
##################
# As long as what is returned is json it is saved. This is already taken care of during the json.loads call.
if 'html' in content:
save_output(pipe, content, True)
else:
save_output(pipe, {"output": content}, False)
return JsonResponse({}, status=request.status_code)
def create_pipe(request, pipeline_pk):
if request.method == 'PUT':
with transaction.atomic():
request_data = json.loads(request.body)
pipe_id = request_data['pipe_id'] # use this to decide after which pipe to create new pipe
request_id = request_data['request_id']
pipeline = Pipeline.objects.get(pk=pipeline_pk)
pipeline.local_id_gen += 1
pipeline.save()
# if pipes exist increment all pipes positions ahead of this one. We will insert the pipe below this one in
# the position of the pipe that was there before incrementing its position
if pipe_id != 'empty':
pipe = Pipe.objects.get(pk=pipe_id)
pipe_position = pipe.position
next_postition = pipe_position + 1 # this is the position the new pipe will occupy
# get all pipes at this position and beyond and increment them by 1
pipes = Pipe.objects.filter(position__gte=next_postition)
for pipe in pipes:
pipe.position += 1
pipe.save()
else:
next_postition = 0 # start from 0
request_object = PathRequest.objects.get(pk=request_id)
new_pipe = Pipe.objects.create(pipe_line=pipeline,
request=request_object,
output='',
local_id=pipeline.local_id_gen,
position=next_postition,
owner=request.user)
new_pipe.save()
param_filter = ['id', 'in', '_state', 'is_expression', 'items']
parameters = extract_parameters(request_object.parameters, param_filter, loads=False)
def create_param_objects(parameters, parent=None):
for param in parameters:
properties = None
if 'properties' in param:
properties = param['properties']
del param['properties']
new_param = Parameter(pipe=new_pipe, info=param, parent=parent)
new_param.save()
if properties:
create_param_objects(properties, new_param)
create_param_objects(parameters)
# json_new_pipe = json.loads(serializers.serialize('json', [new_pipe]))
# json_new_pipe = json_new_pipe[0]
# json_new_pipe['pipe_origin'] = pipe_id
return JsonResponse({"new_pipe": new_pipe.pk, "pipe_origin": pipe_id})
def delete_pipe(request, pipeline_pk, pipe_pk):
if request.method == 'PUT':
with transaction.atomic():
pipe = Pipe.objects.get(pk=pipe_pk)
next_position = pipe.position + 1
pipe.delete()
# need to move back all future pipes by 1
pipes = Pipe.objects.filter(position__gte=next_position)
for pipe in pipes:
pipe.position -= 1
pipe.save()
return JsonResponse({"deleted_pipe": pipe_pk})
def move_up_pipe(request, pipeline_pk, pipe_pk):
if request.method == 'PUT':
with transaction.atomic():
current_pipe = Pipe.objects.get(pk=pipe_pk)
current_position = current_pipe.position
try:
pipe_before_current = Pipe.objects.filter(pipe_line=pipeline_pk) \
.get(position=current_position - 1)
current_pipe.position = pipe_before_current.position
pipe_before_current.position = current_position
current_pipe.save()
pipe_before_current.save()
return JsonResponse({"pipe_origin_id": pipe_pk,
"pipe_swap_id": pipe_before_current.id})
except Exception as e:
return JsonResponse({"message": str(e)})
def move_down_pipe(request, pipeline_pk, pipe_pk):
if request.method == 'PUT':
with transaction.atomic():
current_pipe = Pipe.objects.get(pk=pipe_pk)
current_position = current_pipe.position
try:
pipe_before_current = Pipe.objects.filter(pipe_line=pipeline_pk)\
.get(position=current_position + 1)
current_pipe.position = pipe_before_current.position
pipe_before_current.position = current_position
current_pipe.save()
pipe_before_current.save()
return JsonResponse({"pipe_origin_id": pipe_pk,
"pipe_swap_id": pipe_before_current.id})
except Exception as e:
return JsonResponse({"message": str(e)})
| null |
pipeline/views.py
|
views.py
|
py
| 13,718 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pipeline.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipeline.objects.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipeline.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipeline",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pipeline.models.delete",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pipeline.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipeline",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.filter",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "core.db_interaction.get_pipes_params",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "core.db_interaction.get_pipes_params",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.get",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects.get",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.get",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "core.db_interaction.get_pipe_params",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipeline.objects.get",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipeline.objects",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipeline",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipeline.objects.get",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipeline.objects",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipeline",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "core.db_interaction.get_pipe_params",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects.get",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects.get",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "core.db_interaction.save_parameters",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "core.db_interaction.get_request_params",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "core.db_interaction.get_request_path",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "core.db_interaction.save_output",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "core.db_interaction.save_output",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "pipeline.models",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipeline.objects.get",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipeline.objects",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipeline",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "pipeline.models.local_id_gen",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "pipeline.models.save",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "pipeline.models",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.get",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.filter",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "component_browser.models.PathRequest.objects.get",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "component_browser.models.PathRequest.objects",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "component_browser.models.PathRequest",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.create",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "pipeline.models",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "pipeline.models.local_id_gen",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "core.parse_open_api.extract_parameters",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.get",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.filter",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.get",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.filter",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.get",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "pipeline.models.Pipe.objects.filter",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "pipeline.models.Pipe.objects",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "pipeline.models.Pipe",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 342,
"usage_type": "call"
}
] |
316336639
|
#!/usr/bin/env python
import sys
import argparse
import os
import warnings
import json
import prism
import transformers
import bert_score
from scipy import stats
import matplotlib.pyplot as plt
import pandas as pd
import statistics
import csv
import seaborn as sns
import numpy as np
import datasets
__models__ = ['prism', 'bert_score', 'roberta_ft', 'bleu', 'bleurt', 'fed']
def create_arg_parser():
"""Creates and returns the ArgumentParser object."""
parser = argparse.ArgumentParser(description='Description of your app.')
parser.add_argument('-d','--datadir', type=str, help='Path to data from baseline_preprocess.py.')
parser.add_argument('-o','--outputdir', type=str, help='Path to output to write scores.')
parser.add_argument('-p','--plotdir', type=str, help='Path to output to write plots.')
parser.add_argument('--ref', type=str, help='Reference to use: ref, empty.')
parser.add_argument('--model', type=str, help='Model to use. Implemented for prism, bert_score, roberta_ft.')
return parser
def get_scores(modelname, datadir, outputdir=None, ref='ref'):
"""
Uses specified model to score examples read from datadir. See README.md for proper data format
Keyword arguments:
model -- model to use
datadir -- string directory to fetch data (tsv)
outputdir -- optional string directory path to save output
ref -- optional string denoting reference string. either 'ref' or 'empty'
"""
# check ref argument validity
if ref not in ['ref', 'context_last', 'empty', 'multi_avg', 'multi_max', 'multi']:
raise ValueError("ref must be 'ref' or 'context_last' or 'empty' or 'multi_avg' or 'multi_max' or 'multi'.")
if modelname not in __models__:
raise ValueError("model not listed")
# get scores
if modelname == 'prism':
model = prism.Prism(model_dir=os.environ['MODEL_DIR'], lang='en')
elif modelname == 'bert_score':
pass # no model directory
elif modelname == 'roberta_ft':
pass # no model directory
elif modelname == 'bleu':
model = datasets.load_metric("sacrebleu")
elif modelname == 'bleurt':
model = datasets.load_metric('bleurt', 'bleurt-large-512')
else:
warnings.warn('Model not listed.')
# read in data
data = pd.read_csv(datadir, sep='\t')
# determine model inputs
if ref == 'ref':
ref_list = data['reference_text'].astype(str).to_list()
elif ref == 'context_last':
ref_list = data['prompt_text'].astype(str).to_list()
elif ref == 'empty':
ref_list = [''] * len(data['candidate_text'])
cand_list = data['candidate_text'].astype(str).to_list()
# determine model and calculate scores
score = []
if modelname == 'prism':
if ref == 'multi_avg' or ref == 'multi_max':
# ref
ref_list = data['reference_text'].astype(str).to_list()
ref_score = [model.score([c], [r]) for c, r in zip(cand_list, ref_list)]
# context_last
ref_list = data['prompt_text'].apply(lambda x: str(x).split('\n')[-1]).to_list()
context_score = [model.score([c], [r]) for c, r in zip(cand_list, ref_list)]
# empty
ref_list = [''] * len(data['candidate_text'])
empty_score = [model.score([c], [r]) for c, r in zip(cand_list, ref_list)]
else:
score = [model.score([c], [r]) for c, r in zip(cand_list, ref_list)]
elif modelname == 'bert_score':
p, r, score = bert_score.score(cands=cand_list, refs=ref_list, lang='en', verbose=True)
elif modelname == 'roberta_ft':
p, r, score = bert_score.score(cands=cand_list, refs=ref_list, lang='en', verbose=True, model_type='../Chatbot_evaluation/models/roberta_ft', num_layers=10)
elif modelname == 'bleu':
if ref == 'multi':
# ref
ref_list = data['reference_text'].astype(str).to_list()
# context_last
context_list = data['prompt_text'].apply(lambda x: str(x).split('\n')[-1]).to_list()
bs = [model.compute(predictions=[cand], references=[[ref, ctx]]) for cand, ref, ctx in zip(cand_list, ref_list, context_list)]
else:
bs = [model.compute(predictions=[c], references=[[r]]) for c, r in zip(cand_list, ref_list)]
score = [x['bp'] for x in bs]
elif modelname == 'bleurt':
preds = model.compute(predictions=cand_list, references=ref_list)
score = preds['scores']
# add scores to dataframe
if modelname == 'prism' and (ref == 'multi_avg' or ref == 'multi_max'):
data['ref_score'] = ref_score
data['context_score'] = context_score
data['empty_score'] = empty_score
if ref == 'multi_avg':
data['score'] = data[['ref_score', 'context_score', 'empty_score']].mean(axis=1)
elif ref == 'multi_max':
data['score'] = data[['ref_score', 'context_score', 'empty_score']].max(axis=1)
else:
data['score'] = score
# write scores to output
if outputdir is not None:
data.to_csv(outputdir, sep='\t')
return data
def plot_correlation(scores, plotdir, ref, modelname):
"""
plots correlation between human annotation and evaluation scores
Keyword arguments:
scores -- dataframe of examples and scores
plotdir -- string directory path to save plots
"""
scores.dropna(subset=['score', 'win_ratio'], inplace=True)
evaluation_scores = np.array(scores['score'], dtype=float)
win_ratio = np.array(scores['win_ratio'], dtype=float)
# construct correlation plot
plt.figure()
fig, ax = plt.subplots(1,1, figsize=(12,12))
# compute correlation
slope, intercept, r_value, p_value, std_err = stats.linregress(evaluation_scores, win_ratio)
# plot
ax.plot(evaluation_scores, win_ratio, 'o', label='original data')
ax.plot(evaluation_scores, intercept + slope*evaluation_scores, 'r', label='fitted line')
ax.set_title(label='$R=${0}\n p={1}'.format(str(round(r_value,6)), str(round(p_value,6))))
print('$R=${0}\n p={1}'.format(str(round(r_value,6)), str(round(p_value,6))))
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right')
fig.text(0.5, 0.00, 'Automatic Evaluation Metric Score', ha='center', fontsize=12)
fig.text(0.02, 0.5, 'Win Ratio', va='center', rotation='vertical', fontsize=12)
fig.tight_layout()
plt.savefig(plotdir)
plt.close()
# for multi ref PRISM, also compare to single ref scores
if modelname == 'prism' and (ref == 'multi_avg' or ref == 'multi_max'):
for col in ['ref_score', 'context_score', 'empty_score']:
# compute correlation
slope, intercept, r_value, p_value, std_err = stats.linregress(evaluation_scores, scores[col])
print(col)
print('$R=${0}\n p={1}'.format(str(round(r_value,6)), str(round(p_value,6))))
def main():
print("Entered main...")
arg_parser = create_arg_parser()
try:
options = arg_parser.parse_args()
except ArgumentError():
print('baseline_scores.py --model <model> --ref <ref> -d <datadir> -o <outputdir> -p <plotdir>')
print(options)
print('Getting scores... ')
scores = get_scores(modelname=options.model, datadir=options.datadir, outputdir=options.outputdir, ref=options.ref)
if options.plotdir is not None:
print('Getting correlations...')
median_annotations = plot_correlation(scores=scores, plotdir=options.plotdir, ref=options.ref, modelname=options.model)
if __name__ == '__main__':
main()
sys.exit(0)
| null |
chateval_db_scores.py
|
chateval_db_scores.py
|
py
| 7,668 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "prism.Prism",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "datasets.load_metric",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "datasets.load_metric",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "bert_score.score",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "bert_score.score",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "scipy.stats.linregress",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "scipy.stats.linregress",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 180,
"usage_type": "call"
}
] |
200532619
|
import requests
from sys import exit
class Finance:
indicat = [ 'Bid', 'Open', 'Ask', 'Volume','All','Exit' ]
url = "https://finance.yahoo.com/quote/AAPL?p=AAPL"
def __init__(self):
self.ShowScreen(1)
param = input("\nSelect value from the list above: ")
if(param.capitalize() == 'All' or int (param) == 4):
self.getAllParams()
else:
self.getParamValue(param)
print("\nBye..")
exit(0)
def getParamValue(self,param):
inputType = True if param.isalpha() else False
if inputType:
param = param.capitalize()
if param in self.indicat:
pass
else:
print("you enter Wrong value, Please try again")
exit(1)
else:
param = int(param)
if param < len(self.indicat):
if(param == len(self.indicat)-1):
print("\nBye...")
exit(0)
else:
param = self.indicat[param]
else:
print("you enter Wrong value, Please try again")
exit(2)
try:
response = requests.get(self.url)
htmltext = response.text
splitlist = htmltext.split(param)
result = splitlist[1].split("\">")[2].split("<")[0]
except:
result = "Error...We will fix it soon"
exit(3)
print(f"\n{Colors.OKGREEN}Result: {result}{Colors.OKGREEN}")
def getAllParams(self):
for i in range((len(self.indicat)-2)):
self.getParamValue(self.indicat[i])
def ShowScreen(self, showMeThis):
if showMeThis == 1: # welcome Screen
print(f"\n{Colors.HEADER}Welcom to Apple Inc.(AAPL) Finance\n{Colors.HEADER}")
for i in self.indicat:
print(f" [*]\t{i}")
class Colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
##############
Finance()
| null |
Finance_web_scraping_Exmaple.py
|
Finance_web_scraping_Exmaple.py
|
py
| 2,258 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.exit",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 47,
"usage_type": "call"
}
] |
299817752
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 11 11:26:12 2016
@author: SRINIVAS
"""
from bs4 import BeautifulSoup as bs
import urllib.request
t=[]
we=0
url='https://projecteuler.net/problem='
meep=input('Problem #')
url=url+meep
page = urllib.request.urlopen(url)
soup = bs(page.read())
tea=soup.findAll('p')
for elem in tea:
print(bs.get_text(elem))
| null |
E find.py
|
E find.py
|
py
| 380 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "urllib.request.request.urlopen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup.get_text",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "name"
}
] |
551556376
|
import os, pickle
import pandas as pd
import numpy as np
import xml.etree.ElementTree as ET
TEST_ROOT = 'validate_results/py-faster-rcnn2_15000_iters100000test53107_12_10/Main/' # 模型测试结果根目录
ANNOTATIONS_ROOT = 'data/validate_annotations/Annotations/' # 标注根目录
CLASS_IDS = ['0001', '0002', '0003', '0004', '0005', '0006', '0007', '0008', '0009', '0010',
'0011', '0012', '0013', '0014', '0015', '0016', '0017', '0018', '0019', '0020',
'0021', '0022', '0023', '0024', '0025', '0026', '0027', '0028', '0029', '0030',
'0031', '0032']
def get_classX_test_paths(class_id, test_root = TEST_ROOT):
# 获取类别X的测试结果路径
classX_test_paths, files = [], None
for root, dirs, files in os.walk(test_root):
pass
for file in files:
if file[-8:-4] == class_id:
classX_test_paths.append(file)
f = lambda x: test_root + x
classX_test_paths = list(map(f, classX_test_paths))
return classX_test_paths
def read_classX_test_data(classX_test_paths):
# 获取类别X的测试结果数据
classX_test_data = pd.DataFrame(columns=['image_id', 'score', 'b0', 'b1', 'b2', 'b3', 'predict_class'])
for classX_test_path in classX_test_paths:
temp = pd.read_csv(classX_test_path, header=None, names=['image_id', 'score', 'b0', 'b1', 'b2', 'b3'], sep=' ')
temp['predict_class'] = class_id
classX_test_data = classX_test_data.append(temp)
classX_test_data = classX_test_data.sort_values('score', ascending=False).reset_index(drop=True)
return classX_test_data
def get_annotation_paths(annotations_root = ANNOTATIONS_ROOT):
# 获取标注路径
files = None
for root, dirs, files in os.walk(annotations_root):
pass
f = lambda x: annotations_root + x
annotation_paths = list(map(f, files))
return annotation_paths
def read_image_annotations(img_id, class_id, annotations_root = ANNOTATIONS_ROOT):
# 获取图像的标注
annotations_path = annotations_root + img_id + '.xml'
annotations_xml_root = ET.parse(annotations_path)
object_nodes = annotations_xml_root.findall('object')
ref_bboxs = []
for object in object_nodes:
if object.find('name').text == class_id:
temp_bbox = []
bbox_node = object.find('bndbox')
temp_bbox.append(int(bbox_node.find('xmin').text))
temp_bbox.append(int(bbox_node.find('ymin').text))
temp_bbox.append(int(bbox_node.find('xmax').text))
temp_bbox.append(int(bbox_node.find('ymax').text))
ref_bboxs.append(temp_bbox)
return ref_bboxs
def statistic_class_counts(annotations_root = ANNOTATIONS_ROOT):
# 统计整个验证集中各个类别的数量
dump_path = 'class_counts.pkl'
if os.path.exists(dump_path):
print('class_counts exist')
with open(dump_path, 'rb') as f:
class_counts = pickle.load(f)
else:
print('class_count not exist')
class_counts = [0] * 32
annotation_paths = get_annotation_paths(annotations_root)
for annotation_path in annotation_paths:
annotations_xml_root = ET.parse(annotation_path)
object_nodes = annotations_xml_root.findall('object')
for object in object_nodes:
class_id = int(object.find('name').text)
class_counts[class_id - 1] += 1
with open(dump_path, 'wb') as f:
pickle.dump(class_counts, f, protocol=4)
return class_counts
def calculate_iou(ref_bbox, test_bbox):
# 计算两个矩形框的iou值
ref_x1 = ref_bbox[0]
ref_y1 = ref_bbox[1]
ref_x2 = ref_bbox[2]
ref_y2 = ref_bbox[3]
ref_width = ref_x2 - ref_x1
ref_height = ref_y2 - ref_y1
ref_area = ref_width * ref_height
test_x1 = test_bbox[0]
test_y1 = test_bbox[1]
test_x2 = test_bbox[2]
test_y2 = test_bbox[3]
test_width = test_x2 - test_x1
test_height = test_y2 - test_y1
test_area = test_width * test_height
start_x = min(ref_x1, test_x1)
end_x= max(ref_x2, test_x2)
start_y = min(ref_y1, test_y1)
end_y = max(ref_y2, test_y2)
inter_width = ref_width + test_width - (end_x - start_x)
inter_height = ref_height + test_height - (end_y - start_y)
inter_area = inter_width * inter_height
if inter_width < 0 or inter_height < 0:
iou = 0
else:
iou = inter_area * 1.0 / (ref_area + test_area - inter_area)
return iou
def calculate_gt_precision_recall(classX_test_data, annotations_root):
# 计算某个类别
class_id = classX_test_data.ix[0, 'predict_class']
ground_truth, precision, recall, tp = [], [], [], 0
for row in range(len(classX_test_data)):#row=0
predict_count = row + 1
img_id = classX_test_data.iloc[row, 0]
test_bbox = list(map(int, map(round, list(classX_test_data.iloc[row, 2:6]))))
ref_bboxs = read_image_annotations(img_id, class_id, annotations_root)
if len(ref_bboxs) == 0:
ground_truth.append(0)
else:
IS_TP = False
for ref_bbox in ref_bboxs:
iou = calculate_iou(ref_bbox, test_bbox)
if iou > 0.5:
IS_TP = True
if IS_TP:
ground_truth.append(1)
tp += 1
else:
ground_truth.append(0)
precision.append(tp / predict_count)
recall.append(tp)
classX_test_data['ground_truth'] = ground_truth
classX_test_data['precision'] = precision
classX_test_data['recall'] = recall
return classX_test_data
def calculate_classX_ap(classX_test_data, threshold, class_count, calculated_max_precisions = None, calculated_rows = 0):
# 计算类别X的AP 具体计算方法见链接http://blog.sina.com.cn/s/blog_9db078090102whzw.html
if calculated_rows == 0:
calculated_max_precisions = []
predict_count, recall = 0, 0
else:
predict_count, recall = calculated_rows, classX_test_data.ix[calculated_rows - 1, 'recall']
class_id = classX_test_data.ix[0, 'predict_class']
classX_count = class_count[int(class_id) - 1]
temp_data = classX_test_data[classX_test_data.score >= threshold]
max_precisions = calculated_max_precisions[:]
for row in range(calculated_rows, len(temp_data)):
predict_count += 1
temp_recall = temp_data.iloc[row, -1]
if recall != temp_recall:
recall = temp_recall
temp_max_precision = temp_data.ix[row, 'precision']
max_precisions.append(temp_max_precision)
calculated_max_precisions.append(temp_max_precision)
calculated_rows = len(temp_data)
# 计算真正例没有超过该类别实际数量的max_precisions,相当于有classX_count-tp+1个正例没有预测出来,可以认为预测置信度为0
# 这时候相当于每增加一个预测都一个真实的标签被预测错误
tp = recall
for i in range(classX_count - tp + 1):
predict_count += 1
max_precisions.append(tp / predict_count)
ap = np.sum(np.array(max_precisions)) / len(max_precisions)
return ap, calculated_max_precisions, calculated_rows
def get_best_threshold(classX_test_data, min_threshold, max_threshold, class_count):
# 获取类别X的最优阈值
max_ap, best_threshold = 0, 0
threshold_range = [x / 100 for x in range(int(min_threshold * 100), int(max_threshold * 100), 1)]
threshold_range.sort(reverse=True)
calculated_max_precisions, calculated_rows = [], 0
for threshold in threshold_range:
ap, calculated_max_precisions, calculated_rows = \
calculate_classX_ap(classX_test_data, threshold, class_count, calculated_max_precisions, calculated_rows)
if ap > max_ap:
max_ap = ap
best_threshold = threshold
return best_threshold, max_ap
if __name__ == '__main__':
class_count = statistic_class_counts(ANNOTATIONS_ROOT)
best_thresholds, max_aps = [], []
for class_id in CLASS_IDS:
classX_test_paths = get_classX_test_paths(class_id, TEST_ROOT)
classX_test_data = read_classX_test_data(classX_test_paths)
classX_test_data = calculate_gt_precision_recall(classX_test_data, ANNOTATIONS_ROOT)
best_threshold, max_ap = get_best_threshold(classX_test_data, 0.1, 1, class_count) # 阈值范围设定在 0.1 ~ 1
best_thresholds.append(best_threshold)
max_aps.append(max_ap)
print(class_id + ' best_threshold: ' + str(best_threshold) + ' max_ap: ' + str(max_ap))
max_mAP = np.sum(np.array(max_aps)) / len(max_aps)
print('best thresholds: ', best_thresholds)
print('max mAP: ', max_mAP)
| null |
deadline_code_results/code/calculate_rcnn2_mAP.py
|
calculate_rcnn2_mAP.py
|
py
| 8,778 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.walk",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "pickle.dump",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 206,
"usage_type": "call"
}
] |
300185574
|
# -*- coding: utf-8 -*-
# Author: Ke Wang
# Contact: wangke17[AT]pku.edu.cn
######################################################################################
# Packages
######################################################################################
# Basic Packages
import os
import time
import argparse
import math
import numpy
import torch
import torch.nn as nn
# import matplotlib
# from matplotlib import pyplot as plt
# Import your custom models.
from models.hello_world import HELLO
from data_utils.tools import Logger
# Pre-set
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
######################################################################################
# Hyper-parameters
######################################################################################
parser = argparse.ArgumentParser(description="Here is your model discription.")
# parser.add_argument("--task_name", default=None, type=str/int/float/bool, required=True, choices=[],
# help="The name of the task to train.")
# Environmental parameters
parser.add_argument('--gpu_id', type=str, default='0', help='Specifies the GPU to use.')
parser.add_argument('--if_load_from_checkpoint', type=bool, default=False, help='If load from saved checkpoint.')
# File parameters
parser.add_argument('--checkpoint_name', type=str, default="None", help='Saved checkpoint name.')
# Model parameters
######################################################################################
# End of hyper parameters
######################################################################################
args = parser.parse_args()
# Set logging file
if args.if_load_from_checkpoint:
timestamp = args.checkpoint_name
else:
timestamp = str(int(time.time()))
args.current_save_path = 'outputs/%s/' % timestamp
if not os.path.exists(args.current_save_path):
os.makedirs(args.current_save_path) # Create the output path
args.log_file = args.current_save_path + time.strftime("log_%Y_%m_%d_%H_%M_%S.txt", time.localtime())
args.log = Logger(args.log_file, level="debug")
args.logger = args.log.logger
# set gpu
if torch.cuda.is_available():
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
args.logger.info("You are now using GPU {}".format(args.gpu_id))
else:
args.logger.warning("CUDA is not avaliable, so now in CPU mode!")
# Write your main code here.
if __name__ == '__main__':
HELLO()
| null |
Basic_framework/main.py
|
main.py
|
py
| 2,397 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "data_utils.tools.Logger",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "models.hello_world.HELLO",
"line_number": 68,
"usage_type": "call"
}
] |
535290133
|
# Simple script to create region file from catalog
import numpy as np
from astropy.io import fits
import sys
wd='/Users/alberto/Downloads/'
input = 'bootes_LOFAR_lba_retana-montenegro18.fit'
output = 'bootes_LOFAR_lba_retana-montenegro18.reg'
size = '8'
color='yellow'
# Open the catalog
cat=fits.open(wd+input)
ra=cat[1].data['RAJ2000']
dec=cat[1].data['DEJ2000']
cat.close()
w=open(wd+output,'w')
for i in range(len(ra)):
w.write('circle('+str(ra[i])+'d,'+str(dec[i])+'d,'+size+'\") # width=2 color='+color+'\n')
w.close()
| null |
create_regfile_from_catalog.py
|
create_regfile_from_catalog.py
|
py
| 531 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "astropy.io.fits.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 15,
"usage_type": "name"
}
] |
309860680
|
import numpy as np
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Dense,Input
import matplotlib.pyplot as plt
#process data
(X_train,_),(X_test,Y_test)=mnist.load_data()
X_train=X_train.astype("float32")/255-0.5
X_test=X_test.astype("float32")/255-0.5
X_train=X_train.reshape((X_train.shape[0],-1))
X_test=X_test.reshape((X_test.shape[0],-1))
print(X_train.shape)
print(X_test.shape)
#build model
#the target dimension to compress
encoding_dim=2
#placeholder for input
input_img=Input(shape=(784,))
#encoder layers
encoded=Dense(128,activation="relu")(input_img)
encoded=Dense(64,activation="relu")(encoded)
encoded=Dense(10,activation="relu")(encoded)
encoder_output=Dense(encoding_dim)(encoded)
#decode layers
decoded=Dense(10,activation="relu")(encoder_output)
decoded=Dense(64,activation="relu")(decoded)
decoded=Dense(128,activation="relu")(decoded)
decoded=Dense(784,activation="tanh")(decoded)
#autoencoder model
autoencoder=Model(input=input_img,output=decoded)
#encoder model
encoder=Model(input=input_img,output=encoder_output)
#compile model
autoencoder.compile(optimizer="adam",loss="mse")
autoencoder.fit(X_train,X_train,epochs=50,batch_size=256,shuffle=True)
# plotting
encoded_imgs = encoder.predict(X_test)
plt.scatter(encoded_imgs[:, 0], encoded_imgs[:, 1], c=Y_test)
plt.colorbar()
plt.show()
| null |
Autoencoder.py
|
Autoencoder.py
|
py
| 1,361 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "keras.datasets.mnist.load_data",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras.datasets.mnist",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "keras.layers.Input",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
}
] |
566560367
|
#! /usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
origin_data = []
processed_data = []
sample_rate = 44100
with open("fft_output.txt", 'r') as f:
origin_data = [float(x) for x in f.readline().split()]
processed_data = [float(x) for x in f.readline().split()]
y1 = np.array(origin_data)
y2 = np.array(processed_data)
x1 = np.array(range(len(y1)))
x1 = x1 * sample_rate / len(y1) / 2
fig, (ax1) = plt.subplots(1, 1, sharex=True)
ax1.plot(x1, y1, linewidth=1.0, color='darkmagenta')
ax1.plot(x1, y2, linewidth=1.0, color='darkmagenta')
#ax1.set_xlim(0,len(y1));
fig.tight_layout()
plt.show()
| null |
plot_data.py
|
plot_data.py
|
py
| 625 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
}
] |
275761709
|
import numpy as np
import sys
if '../' not in sys.path:
sys.path.append('../')
from lib.envs.gridworld import GridworldEnv
def policy_eval(policy, env, gamma=1.0, theta=0.00001):
V = np.zeros(env.nS)
nS = env.nS
P = env.P
while True:
delta = 0
for s in range(env.nS):
v = 0
for a, action_prob in enumerate(policy[s]):
for prob, ns, reward, _ in env.P[s][a]:
v += action_prob*prob*(reward + gamma*V[ns])
delta = max(delta, np.abs(v - V[s]))
V[s] = v
if delta < theta:
break
return np.array(V)
if __name__ == '__main__':
env = GridworldEnv()
random_policy = np.ones([env.nS, env.nA])/env.nA
v = policy_eval(random_policy, env)
print('v: {}'.format(v))
| null |
policy_eval.py
|
policy_eval.py
|
py
| 812 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "lib.envs.gridworld.GridworldEnv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 26,
"usage_type": "call"
}
] |
37423887
|
import sys, os
import argparse
import pandas as pd
from itertools import product
from multiprocessing import Pool
import subprocess
'''
This script joins the bias files per eta-rings intervals.
'''
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dof", type=str, help="DOF file", required=True)
parser.add_argument("-i", "--inputdir", type=str, help="Input dir", required=True)
parser.add_argument("-o", "--outputdir", type=str, help="Output dir", required=True)
parser.add_argument("-er","--eta-rings", type=int, nargs="+", help="Etarings: 18 19 20 ", required=False)
parser.add_argument("-fw", "--fixed", action="store_true", help="Fixed weights")
parser.add_argument("-ws", "--weights-signal", nargs='+', type=int, help="Select signal in weights set", required=False)
parser.add_argument("-wp", "--weights-pu", nargs='+', type=int, help="Select PU in weights set", required=False)
args = parser.parse_args()
if not os.path.exists(args.outputdir):
os.makedirs(args.outputdir)
dof = pd.read_csv(args.dof , sep=",")
commands = []
for etaring in args.eta_rings:
if args.fixed:
list_of_files = []
for stripid in dof[abs(dof.eta_ring) == etaring].stripid.unique():
list_of_files.append("{}/bias_stripID{}.root".format(args.inputdir, stripid))
command = "hadd -f {}/bias_rings_etr{}.root ".format(args.outputdir, etaring)
command += " ".join(list_of_files)
commands.append(command)
else:
for s, p in product(args.weights_signal, args.weights_pu):
list_of_files = []
for stripid in dof[abs(dof.eta_ring) == etaring].stripid.unique():
list_of_files.append("{}/bias_stripID{}_PU{}_S{}.root".format(args.inputdir, stripid,p,s ))
command = "hadd -f {}/bias_rings_etr{}_PU{}_S{}.root ".format(args.outputdir, etaring, p, s)
command += " ".join(list_of_files)
commands.append(command)
def run(command):
subprocess.call(command.split(" "))
p = Pool()
print("Executing hadd...")
p.map(run, commands)
print("DONE")
| null |
PileupMC_v2/joinFilesByEtaRing.py
|
joinFilesByEtaRing.py
|
py
| 2,082 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 52,
"usage_type": "call"
}
] |
512214684
|
from unittest.mock import Mock
import IPython.display
from solara.server import app, kernel
def test_shell(no_app_context):
ws1 = Mock()
ws2 = Mock()
kernel1 = kernel.Kernel()
kernel2 = kernel.Kernel()
kernel1.session.websockets.add(ws1)
kernel2.session.websockets.add(ws2)
context1 = app.AppContext(id="1", kernel=kernel1)
context2 = app.AppContext(id="2", kernel=kernel2)
with context1:
IPython.display.display("test1")
assert ws1.send.call_count == 1
assert ws2.send.call_count == 0
with context2:
IPython.display.display("test1")
assert ws1.send.call_count == 1
assert ws2.send.call_count == 1
| null |
tests/unit/shell_test.py
|
shell_test.py
|
py
| 691 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.mock.Mock",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "solara.server.kernel.Kernel",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "solara.server.kernel",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "solara.server.kernel.Kernel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "solara.server.kernel",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "solara.server.app.AppContext",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "solara.server.app",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "solara.server.app.AppContext",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "solara.server.app",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "IPython.display.display.display",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "IPython.display",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "IPython.display.display.display",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "IPython.display.display",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "IPython.display",
"line_number": 23,
"usage_type": "name"
}
] |
361876175
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Vacancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, verbose_name='created', default=django.utils.timezone.now)),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, verbose_name='modified', default=django.utils.timezone.now)),
('status', model_utils.fields.StatusField(max_length=100, no_check_for_status=True, choices=[('active', 'active'), ('draft', 'draft')], verbose_name='status', default='active')),
('status_changed', model_utils.fields.MonitorField(monitor='status', verbose_name='status changed', default=django.utils.timezone.now)),
('company', models.CharField(max_length=100, verbose_name='Компания')),
('name', models.CharField(max_length=100, verbose_name='Должность')),
('salary', models.CharField(max_length=100, verbose_name='З/П', blank=True)),
('url', models.URLField(unique=True, verbose_name='Внешняя ссылка')),
('published_at', models.DateTimeField(verbose_name='Дата публикации', null=True)),
('description', models.TextField(help_text='Markdown', verbose_name='Текст', blank=True)),
('contacts', models.TextField(help_text='Markdown', verbose_name='Контакты', blank=True)),
('type', models.CharField(max_length=50, choices=[('fulltime', 'Фултайм'), ('contract', 'Контракт')], verbose_name='Занятость', default='fulltime')),
('is_participant', models.BooleanField(verbose_name='На митапе', default=False)),
('is_priority', models.BooleanField(verbose_name='Приоритетная вакансия', default=False)),
],
options={
'ordering': ['-is_priority', '-published_at'],
'verbose_name_plural': 'Вакансии',
'verbose_name': 'Вакансия',
},
bases=(models.Model,),
),
]
| null |
apps/vacancies/migrations/0001_initial.py
|
0001_initial.py
|
py
| 2,510 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "model_utils.fields.fields.AutoCreatedField",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "model_utils.fields.fields",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "model_utils.fields",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.db.utils",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "model_utils.fields.fields.AutoLastModifiedField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "model_utils.fields.fields",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "model_utils.fields",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.utils",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "model_utils.fields.fields.StatusField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "model_utils.fields.fields",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "model_utils.fields",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "model_utils.fields.fields.MonitorField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "model_utils.fields.fields",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "model_utils.fields",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.utils",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "django.db",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.URLField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 39,
"usage_type": "name"
}
] |
464058095
|
import re
from nltk.tokenize import sent_tokenize
def detect_dependency(text):
"""Attempts to determine if the claim set out in text is dependent - if it is dependency is returned - if claim is deemed independent 0 is returned as dependency """
p = re.compile('(of|to|with|in)?\s(C|c)laims?\s\d+((\sto\s\d+)|(\sor\s(C|c)laim\s\d+))?(,\swherein)?')
located = p.search(text)
if located:
num = re.compile('\d+')
dependency = int(num.search(located.group()).group())
else:
# Also check for "preceding claims" or "previous claims" = claim 1
pre = re.compile('\s(preceding|previous)\s(C|c)laims?(,\swherein)?')
located = pre.search(text)
if located:
dependency = 1
else:
dependency = 0
return dependency
def detect_category(text):
"""Attempts to determine and return a string containing the claim category, initially from a choice of two: (method or process - "method") OR (system/product/apparatus - "system")
param string text: the claim text as a string
"""
p = re.compile('(A|An|The)\s([\w-]+\s)*(method|process)\s(of|for)?')
located = p.search(text)
if located:
return "method"
else:
return "system"
def split_into_features(text):
""" Attempts to split a claim into features.
param string text: the claim text as a string
"""
featurelist = []
startindex = 0
#split_re = r'(.+;\s*(and)?)|(.+,.?(and)?\n)|(.+:\s*)|(.+\.\s*$)'
split_expression = r'(;\s*(and)?)|(,.?(and)?\n)|(:\s*)|(\.\s*$)'
p = re.compile(split_expression)
for match in p.finditer(text):
feature = {}
feature['startindex'] = startindex
endindex = match.end()
feature['endindex'] = endindex
feature['text'] = text[startindex:endindex]
featurelist.append(feature)
startindex = endindex
# Try spliting on ';' or ',' followed by '\n' or ':'
#splitlist = filter(None, re.split(r";|(,.?\n)|:", text))
# This also removes the characters - we want to keep them - back to search method?
return featurelist
def extract_claims(text):
""" Attempts to extract claims as a list from a large text string.
param string text: string containing several claims
"""
sent_list = sent_tokenize(text)
# On a test string this returned a list with the claim number and then the
# claim text as separate items
claims_list = [" ".join(sent_list[i:i+2]) for i in xrange(0, len(sent_list), 2)]
return claims_list
| null |
app/mod_nlp/claim_processing.py
|
claim_processing.py
|
py
| 2,560 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "re.compile",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.sent_tokenize",
"line_number": 58,
"usage_type": "call"
}
] |
16827393
|
# coding=utf-8
import os
import sys
import psutil
mProc = 'desktop.exe'
mProcs = 'desktop'
mFile = '..\\RX\Record.txt'
mOutFile = '..\\PY_RXX\\ROut.txt'
replay = "test?r=replay&file={_file}&outfile={_outfile}"
def existsProc(processname):
pl = psutil.pids()
for pid in pl:
if psutil.Process(pid).name() == processname:
print(pid)
break
else:
print("not found")
def findPid(processname):
pl = psutil.pids()
for pid in pl:
if psutil.Process(pid).name() == processname:
print(pid)
return pid
break
else:
print("not found")
return 0
def killProc(pid):
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def startProc(file, arg):
main = "file " + arg
r_v = os.system(main)
print(r_v)
if __name__ == "__main__":
pid = findPid(mProcs)
while pid > 0:
killProc(pid)
time.sleep(100)
pid = findPid(mProcs)
arg = replay.format(_file=mFile,_outfile=mOutFile)
for i in range(0,5):
startProc(mProc,arg)
print('Test loop {_t}'.format(_t=i))
while existsProc(mProcs):
time.sleep(100)
print('all test done')
pass
| null |
Notes/Python/Process/UITest.py
|
UITest.py
|
py
| 1,958 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "psutil.pids",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "psutil.Process",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "psutil.pids",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "psutil.Process",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.kill",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 63,
"usage_type": "call"
}
] |
370492354
|
__author__ = "Pruthvi Kumar, [email protected]"
__copyright__ = "Copyright (C) 2018 Pruthvi Kumar | http://www.apricity.co.in"
__license__ = "Public Domain"
__version__ = "1.0"
import os
from jinja2 import Environment, FileSystemLoader
from nucleus.db.cache_manager import CacheManager
class MetaGen(CacheManager):
def __init__(self):
super(MetaGen, self).__init__()
self.logger = self.get_logger(log_file_name='metaGen_logs',
log_file_path='{}/trace/metaGen_logs.log'.format(self.ROOT_DIR))
self.__models_root = '{}/mic/models'.format(self.ROOT_DIR)
self.__controllers_root = '{}/mic/controllers'.format(self.ROOT_DIR)
self.__main_executable = '{}/main.py'.format(self.ROOT_DIR)
self.__jinja_env = Environment(loader=FileSystemLoader('{}/nucleus/templates/'.format(self.ROOT_DIR)))
self.__models_template = self.__jinja_env.get_template('model_template.py')
self.__controllers_template = self.__jinja_env.get_template('controller_template.py')
self.new_mic = self.__meta_generator
def __meta_generator(self, mic_name):
"""
Generate structure for everything from models, & controllers.
:param mic_name:
:return:
"""
new_model = self.__models_root + '/{}'.format(mic_name)
try:
if not os.path.exists(new_model):
os.makedirs(new_model)
open(new_model + '/__init__.py', 'w+').close()
#open(new_model + '/model_{}.py'.format(mic_name), 'w+').close()
with open(new_model + '/model_{}.py'.format(mic_name), 'w+') as mf:
mf.write(self.__models_template.render(modelName=mic_name))
# Generate Controllers for newly created model.
with open(self.__controllers_root + '/controller_{}.py'.format(mic_name), 'w+') as cf:
cf.write(self.__controllers_template.render(modelName=mic_name, controllerName=mic_name))
return('Meta Structure of models & controllers for {} successfully created!'.format(mic_name))
else:
raise Exception('[MetaGen]: File/Folder named {} already exists in path {}. MetaGen will require '
'unique names for it to generate MIC structure.'.format(mic_name, new_model))
except Exception as e:
self.logger.exception('[MetaGen]: Exception during instantiating MIC stack for {}. '
'Details: {}'.format(mic_name, str(e)))
if __name__ == '__main__':
mg = MetaGen()
mg.new_mic('test_5')
| null |
nucleus/metagen.py
|
metagen.py
|
py
| 2,668 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "nucleus.db.cache_manager.CacheManager",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "jinja2.Environment",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 35,
"usage_type": "call"
}
] |
261047640
|
import torch
from torch import nn
from torch.utils.data import Dataset
class Engine:
def __init__(self, model, optimizer, device):
self.model = model
self.optimizer = optimizer
self.device = device
@staticmethod
def loss_fn(targets, outputs):
return nn.BCEWithLogitsLoss()(outputs, targets)
def train(self, data_loader):
self.model.train()
final_loss = 0
for x, t, y in data_loader:
self.optimizer.zero_grad()
inputs = x.to(self.device)
timestamps = t.to(self.device)
targets = y.to(self.device)
outputs = self.model(inputs, timestamps).squeeze(1)
loss = self.loss_fn(targets, outputs)
# print(loss.shape)
loss.backward()
self.optimizer.step()
final_loss += loss.item()
return (final_loss/len(data_loader))
def evaluate(self, data_loader):
self.model.eval()
final_loss = 0
with torch.no_grad():
for x, t, y in data_loader:
inputs = x.to(self.device)
timestamps = t.to(self.device)
targets = y.to(self.device)
outputs = self.model(inputs, timestamps).squeeze(1)
loss = self.loss_fn(targets, outputs)
final_loss += loss.item()
return (final_loss/len(data_loader))
class ParlDataset(Dataset):
def __init__(self, features, timestamps, labels):
self.features = features
self.timestamps = timestamps
self.labels = labels
def __getitem__(self, idx):
item = (
torch.tensor(self.features[idx]),
torch.tensor(self.timestamps[idx], dtype=torch.float32),
torch.tensor(self.labels[idx], dtype=torch.float32)
)
return item
def __len__(self):
return len(self.labels)
| null |
utils_timeLSTM.py
|
utils_timeLSTM.py
|
py
| 1,926 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 61,
"usage_type": "attribute"
}
] |
574940043
|
from memoize import memoized
def is_complete(csp, assignment):
w, h, horiz_constr, vert_constr = csp
return len(assignment) == h
@memoized
def order_domain_values(csp, var):
w, h, horiz_constr, vert_constr = csp
# calculate the possible lengths and movements
# generate the numbers by moving sequences of ones
for bits in generate_bits(horiz_constr[var][::-1], w):
yield bits
def generate_bits(constraint, length):
return generate_bits_rec(constraint, length, bits=(), zeros=0, part=0)
def generate_bits_rec(constraint, length, bits, zeros, part):
if len(bits) == length and part == len(constraint) + 1:
yield bits
if part == 0 or part >= len(constraint):
choice_start = 0
else:
choice_start = 1
for choice in range(choice_start, length - sum(constraint) - zeros + 1):
if part < len(constraint):
new_bits = bits + (0,) * choice + (1,) * constraint[part]
else:
new_bits = bits + (0,) * choice
new_zeros = zeros + choice
new_part = part + 1
if new_part <= len(constraint) + 1 and len(new_bits) <= length:
yield from generate_bits_rec(constraint, length, new_bits, new_zeros, new_part)
# @memoized
def is_consistent(csp, assignment, value):
# Todo: should be called value_is_consistent_with_assignment
"""Assumes all the assignments are consistent with the horizontal constraints so it checks only the verticals."""
w, h, horiz_constr, vert_constr = csp
new_ass = assign_value(assignment, value, len(assignment))
if len(new_ass) == h:
for col in range(len(vert_constr)):
if not col_is_consistent(csp, new_ass, vert_constr[col], col):
return False
return True
# @memoized
def col_is_consistent(csp, assignment, constr, col):
w, h, horiz_constr, vert_constr = csp
row = tuple(bits[col] for bits in assignment)
return row_is_consistent(csp, row, constr)
# @memoized
def row_is_consistent(csp, bits, constraint):
lengths_of_1 = []
prev = False
current_length = 0
for bit in bits:
if bit:
current_length += 1
else:
if prev:
if len(lengths_of_1) < len(constraint) and current_length != constraint[len(lengths_of_1)]:
return False
lengths_of_1.append(current_length)
current_length = 0
prev = bit
if current_length > 0:
lengths_of_1.append(current_length)
return len(lengths_of_1) == len(constraint) and all([(a == b) for a, b in zip(lengths_of_1, constraint)])
def complete_assignment(csp):
w, h, horiz_constr, vert_constr = csp
assignment = ()
for var in range(h):
assignment = assign_value(assignment, next(order_domain_values(csp, var)), var)
# todo: maybe choosing values consistent with the vertical constraints will speed things up since we start "nearer"
return assignment
def assign_value(assignment, value, var):
return assignment[:var] + (value,) + assignment[var + 1:]
def null_assignment():
return ()
#
# def assign_value(assignment, value, var):
# return assignment[:var] + [value] + assignment[var + 1:]
#
#
# def null_assignment():
# return []
| null |
level_32/csp_tuple.py
|
csp_tuple.py
|
py
| 3,288 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "memoize.memoized",
"line_number": 9,
"usage_type": "name"
}
] |
392093911
|
# Code provided under the BSD License
from xml.dom import minidom
import urllib.request
import series
import teevee_conf
import re
import time
import sys
import pickle
import tweepy
DEBUG = True
PROD = True
# _________________________________________________________
# Finds a node that matches the given attributes within a DOM
# document
# _________________________________________________________
def findNode( content, attr, slist):
for x in slist:
y = []
for j in range(0,len(attr)):
y.append( x.getAttribute(attr[j]).lower() )
if y == content:
return x
return None
# _________________________________________________________
# FUNCTION: get_newzbin_release
# ARGS: release_id (teevee), release_name
#
# Search for release_name.nzb in teevee newzgroups using
# Newzbin RAW search. If a result is found it is stored
# within one NZB file.
# _________________________________________________________
def get_newzbin_release( release_id, release_name ):
# Performs raw search on newzbin to find the NZB
query = {
"username":teevee_conf.newzbin_user,
"password":teevee_conf.newzbin_pwd,
"query":str(release_id) + " " + ".nzb",
"group":"alt.binaries.teevee"
}
http_data = urllib.parse.urlencode( query )
http_resp = urllib.request.urlopen( \
"http://www.newzbin.com/api/filefind3/",
http_data )
# Parse response and gets fileids
try:
results = int( http_resp.readline().decode().replace("TOTAL=","").rstrip() )
response = ''
print(results)
for i in range(0,results):
response += http_resp.readline().decode().rstrip().split('\t')[0]
if i+1 < results:
response += ','
except:
print('Search failed')
return False
# Retrieves the corresponding NZB file
query = {
"username":teevee_conf.newzbin_user,
"password":teevee_conf.newzbin_pwd,
"fileid":response
}
http_data = urllib.parse.urlencode( query )
# Fetches NZB
while( True ):
http_resp = urllib.request.urlopen( \
"http://www.newzbin.com/api/dnzb/",
http_data )
# DNZB Max Rate is reached : Retry later
if http_resp.getheader('X-DNZB-RCode') == '450':
wait = int( re.findall( r'(\d+)', http_resp.getheader('X-DNZB-RText') )[0] ) + 1
time.sleep( wait )
# Other unhandable error
elif http_resp.getheader('X-DNZB-RCode') != '200':
print('Newzbin:' + http_resp.getheader('X-DNZB-RCode') )
return False
else:
break
# Nzb is fetched, file is saved
outputnzb = open( teevee_conf.newzbin_dest + release_name + ".nzb","bw+")
outputnzb.write( http_resp.readall() )
outputnzb.close()
return True
# _________________________________________________________
# FUNCTION: get_nzbmatrix_release
# ARGS: release_id (teevee), release_name, group (teevee or multimedia)
#
# Search for a given release on NZBMatrix by looking up posts with release_id
# in their subjects on the corresponding groups. The resulting NZB is then fetched
# from nzbmatrix and stored for future use
# _________________________________________________________
def get_nzbmatrix_release( release_id, release_name, group="alt.binaries.teevee" ):
query = {
"username":teevee_conf.nzbmatrix_user,
"apikey":teevee_conf.nzbmatrix_key,
"search":str( release_id ),
"searchin":"subject",
"group":group
}
http_data = urllib.parse.urlencode( query )
# Search for release
while True:
http_resp = urllib.request.urlopen( \
"http://api.nzbmatrix.com/v1.1/search.php?" + http_data )
response = http_resp.readline().decode().rstrip()
# Nothing found : we re-run the search on ab.multi and then drop the file
if response == 'error:nothing_found':
if group == "alt.binaries.teevee":
return get_nzbmatrix_release( release_id, release_name, "alt.binaries.multimedia" )
else:
print("Notfound")
return False
# If we hit the max rate, sleep
elif response.find("error:please_wait") != -1:
time.sleep( int( re.findall( r'(\d+)', response )[0] + 1 ) )
# Other Unknown error
elif response.find("error") != -1:
print( response )
if group == "alt.binaries.teevee":
return get_nzbmatrix_release( release_id, release_name, "alt.binaries.multimedia" )
return False
# Everything is fine
else:
NZB_NAME = http_resp.readline().decode().rstrip().replace("NZBNAME:","").replace(";","")
if NZB_NAME == release_name.replace(".", " ").replace("-", " ").replace("_", " "):
NZB_ID = int( re.findall( r'(\d+)', response )[0] )
else:
if group == "alt.binaries.teevee":
return get_nzbmatrix_release( release_id, release_name, "alt.binaries.multimedia" )
return False
break
# At this point NZB_ID should contain the ID of the Nzb to fetch, let us get it
query = {
"username":teevee_conf.nzbmatrix_user,
"apikey":teevee_conf.nzbmatrix_key,
"id":str(NZB_ID)
}
http_data = urllib.parse.urlencode( query )
http_resp = urllib.request.urlopen( "http://api.nzbmatrix.com/v1.1/download.php?" + http_data )
# Houston we have a problem
if http_resp.getheader('Content-Type') != 'application/x-nzb':
print( http_resp.readline() )
return False
# Otherwise
response = http_resp.readall()
outputnzb = open(teevee_conf.nzbmatrix_dest + release_name + ".nzb","bw+")
outputnzb.write( response )
outputnzb.close( )
# Makes sure that we wait 10 seconds until next download
# Rate limit is per hour now, don't need this no more
# time.sleep( 10 )
return True
# _________________________________________________________
# FUNCTION: download_rlz
# ARGS: release_id (teevee), release_name
#
# Search release and downloads nzb file on newzbin with
# fallback on nzbmatrix
# _________________________________________________________
def download_rlz( release_id, release_name ):
izGood = False
# Let us give newzbin a try
try:
izGood = get_newzbin_release( release_id, release_name )
except:
izGood = False
# Nothing on newzbin (or down, or ... whatever), fallback using nzbmatrix
if not izGood:
try:
print('Trying nzbmatrix ...')
izGood = get_nzbmatrix_release( release_id, release_name )
except:
izGood = False
try:
if izGood and teevee_conf.tw_enable:
tw_api = load_twitter_api()
tw_api.update_status('Incoming: ' + release_name)
except:
pass
return izGood
def load_twitter_api():
auth = tweepy.OAuthHandler(teevee_conf.tw_consumer_token, \
teevee_conf.tw_consumer_secret)
auth.set_access_token(teevee_conf.tw_access_token, \
teevee_conf.tw_access_secret)
api = tweepy.API(auth)
return api
# _________________________________________________________
#
# MAIN CODE:
# Read TEEVEE RSS Feed, According to data contained
# within teevee.xml, REQID's from releases that need
# to be downloaded are retrieved. These ID's are then
# used to retrieve the correponding nzb file(s) on
# UseNet
#
# _________________________________________________________
# Let's go
print('')
print('--' + time.strftime("%a, %d %b %Y %H:%M:%S +0200") + '--')
print('Hi There, Let\'s shake some dust')
print('')
# TEEVEE feed url + some command line options
if "x264" in sys.argv:
FEED_URL = 'http://abteevee.allfilled.com/rss.php?f=x264'
elif "xvid" in sys.argv:
FEED_URL = 'http://abteevee.allfilled.com/rss.php?f=xvid'
else:
# Test feed
print('Test Mode, dummy feed')
FEED_URL=''
if "tw_off" in sys.argv:
print('Twitter-announce forced off')
teevee_conf.tw_enable = False
elif "tw_on" in sys.argv:
print('Twitter-announce forced on')
teevee_conf.tw_enable = True
else:
pass
# Load Download Queue and process it
try:
fq = open( teevee_conf.rlz_queue, 'br' )
release_queue = pickle.load( fq )
fq.close()
except:
release_queue = { }
print('Processing Queue')
for dlrlz in release_queue.copy().keys():
dld = False
print('download_rlz( ' + str( release_queue[ dlrlz ] ) + ', '+ dlrlz + ' )')
if PROD:
dld = download_rlz( release_queue[ dlrlz ], dlrlz )
if not dld:
print('Could not fetch NZB. Maybe next time ?')
else:
print('NZB fetched OK. Removing from queue.')
del release_queue[ dlrlz ]
print('Done')
# Updates Queue content
fq = open( teevee_conf.rlz_queue, 'wb+' )
pickle.dump( release_queue, fq )
fq.close()
# Load Teevee Feed and download new episodes
print('Processing Feed')
# For Release Name parsing
s = series.parser();
# For score comparison at some point
s2 = series.parser();
# Load showlist
tv_stat = minidom.parse("./teevee.xml")
series_list = tv_stat.getElementsByTagName("series")
try:
# Gets feed and retrieves last releases list
feed = urllib.request.urlopen(FEED_URL)
parsedfeed = minidom.parse(feed)
rlz = parsedfeed.getElementsByTagName("item")
# Iterates over releases in RSS
for node in rlz:
rlzname = node.getElementsByTagName("title")[0].firstChild.data
rlzid = int( node.getAttribute("id") )
# If Filename was successfully parsed
if s.scan( rlzname ):
# Lookup current serie in the local database
snode = findNode( [ s.f_title.lower(), s.f_type ] , ["name", "enc"], series_list )
# If a corresponding entry was found
if snode != None:
# Finds if the corresponding episode has already been
# downloaded
series = snode.getElementsByTagName("ep")
epz = findNode( [ s.f_epinfo.lower() ], [ "number" ], series )
# Not downloaded yet
if epz == None:
# Then we download the corresponding NZB
print('download_rlz( ' + str(rlzid) + ', '+ rlzname + ' )')
if PROD:
dld = download_rlz( rlzid , rlzname )
else:
dld = True
# Create one node for th episode and fills its info
ep = tv_stat.createElement("ep")
num = tv_stat.createAttribute("number")
fullname = tv_stat.createTextNode( rlzname )
# Saves release name
num.value = s.f_epinfo.lower()
ep.setAttributeNode( num )
# Updates the tree
ep.appendChild( fullname )
snode.appendChild( ep )
# If the download was not successfull, we queue the file
# to process it next time
if not dld:
print('Could not fetch NZB, queuing ...')
release_queue[ rlzname ] = rlzid
else:
print('NZB fetched OK.')
# Episode already downloaded, let us see if we need to fetch the new file
else:
rlzname2 = epz.firstChild.data
s2.scan( rlzname2 )
# New file has a better score : download
if ( s2.computeScore( teevee_conf.scoretable ) < \
s.computeScore( teevee_conf.scoretable ) ) and \
(rlzname2 != rlzname):
# Print scores for debugging
print(rlzname2 + " scored " + str(s2.computeScore( teevee_conf.scoretable) ) )
print(rlzname + " scored " + str(s.computeScore( teevee_conf.scoretable) ) )
print("Replacing Release")
# Updates XML data
epz.firstChild.data = rlzname
print('download_rlz( ' + str(rlzid) + ', '+ rlzname + ' )')
# Download NZB
if PROD:
dld = download_rlz( rlzid , rlzname )
else:
dld = True
if not dld:
print('Could not fetch NZB, queuing ...')
release_queue[ rlzname ] = rlzid
else:
print('NZB fetched OK.')
# Old file is best, do nothing
else:
print("exists")
print("ID:" + str( rlzid ) + " - " + s.f_group + " - " + rlzname)
# Updates Episode Download Info
outputxml = open("./teevee.xml","w+")
tv_stat.writexml(outputxml)
outputxml.close()
outputxml = open("./teevee2.xml","w+")
tv_stat.writexml(outputxml, "", "", "\n")
outputxml.close()
# Updates Queue content
fq = open( teevee_conf.rlz_queue, 'wb+' )
pickle.dump( release_queue, fq )
fq.close()
except urllib.error.URLError as err:
print('Can\'t open URL :' + format(err))
| null |
pyvee.py
|
pyvee.py
|
py
| 13,963 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "teevee_conf.newzbin_user",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.newzbin_pwd",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.parse.urlencode",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "teevee_conf.newzbin_user",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.newzbin_pwd",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.parse.urlencode",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "teevee_conf.newzbin_dest",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.nzbmatrix_user",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.nzbmatrix_key",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.parse.urlencode",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "teevee_conf.nzbmatrix_user",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.nzbmatrix_key",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.parse.urlencode",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "teevee_conf.nzbmatrix_dest",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.tw_enable",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "teevee_conf.tw_consumer_token",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.tw_consumer_secret",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.tw_access_token",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.tw_access_secret",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "tweepy.API",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.tw_enable",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.tw_enable",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.rlz_queue",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "teevee_conf.rlz_queue",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "series.parser",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "series.parser",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.parse",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "xml.dom.minidom.parse",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "teevee_conf.scoretable",
"line_number": 363,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.scoretable",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.scoretable",
"line_number": 368,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.scoretable",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "teevee_conf.rlz_queue",
"line_number": 405,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "urllib.request.error",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 409,
"usage_type": "name"
}
] |
544688601
|
import vcf
import sys
import os
import json
from collections import Counter, defaultdict
import pysam
import sqlite3
import cStringIO as StringIO
from threadpool import ProducerConsumer
from utils import removeFiles
def consumer(con, returndata):
cur = con.cursor()
for dat in returndata:
if not dat:
continue
fileidx = dat[0]
modvcf = dat[1]
jsonstr = dat[2]
cur.execute("""INSERT INTO trimmed_modvcf(fileID, vcf, json) VALUES (?,?,?)""", (fileidx, modvcf, jsonstr,) )
con.commit()
def producer(info):
fileid = str(info[0])
try:
vcfInput = vcf.Reader(StringIO.StringIO(info[1]))
except:
return None
line = None
try:
line = vcfInput.next()
except:
return None
if not line:
return None
bamfile = "%s.bam"%(fileid)
bamidxfile = "%s.bam.bai"%(fileid)
with open(bamfile, "wb") as o:
o.write(info[2])
with open(bamidxfile, "wb") as o:
o.write(info[3])
vcfInput = vcf.Reader(StringIO.StringIO(info[1]))
vcfohndl = StringIO.StringIO()
vcfOutput = vcf.Writer(vcfohndl, vcfInput)
jsonhndl = StringIO.StringIO()
data = computeData(vcfInput, vcfOutput, bamfile, 0)
json.dump(data, jsonhndl, separators=(',', ':'))
jsonhndl.flush()
jsonstr = jsonhndl.getvalue()
jsonhndl.close()
vcfohndl.flush()
modvcf = vcfohndl.getvalue()
vcfohndl.close()
removeFiles([bamfile, bamidxfile])
return info[0], modvcf, jsonstr
# A method to use the BAM file + the vcf to compute our data, if the vcf does not contain it,
# or we have no idea about the specific field we need to use in the vcf file.
def computeData(inputvcf, outputvcf, samf, shift):
samfile = pysam.Samfile(samf)
vcff = dict([ (v.start - shift, v) for v in inputvcf])
data = []
for p in samfile.pileup():
if p.pos not in vcff:
continue
if outputvcf:
vcff[p.pos].POS -= shift
if not vcff[p.pos].is_snp:
if outputvcf:
outputvcf.write_record(vcff[p.pos])
continue
ref = vcff[p.pos].REF
alts = [str(v) for v in vcff[p.pos].ALT]
counts = defaultdict(Counter)
found_alts= set()
for pread in p.pileups:
if pread.is_del:
continue
base = "N"
try:
base = pread.alignment.seq[pread.qpos]
except:
if pread.query_position:
base = pread.alignment.seq[pread.query_position]
else:
continue
# Do we discard gaps and N?
if base.upper() != 'N' or base != '-':
sample = pread.alignment.qname.split("_")[-1]
found_alts.add(base.upper())
counts[sample].update(base.upper())
newALTS = list(found_alts.difference([ref]))
data.append( dict(pos = vcff[p.pos].POS, counts = dict(counts), ref = ref, alts = newALTS) )
if outputvcf:
vcff[p.pos].ALT = [vcf.model._Substitution(j) for j in newALTS]
outputvcf.write_record(vcff[p.pos])
return data
def updateVCFfiles(args, con):
cur = con.cursor()
cur.execute(""" SELECT A.fileID, A.vcf, B.bam, B.bamidx FROM trimmed_vcf AS A JOIN trimmed_inferSAM AS B ON (A.fileID = B.fileID); """)
rows = ( (r[0], r[1], bytearray(r[2]), bytearray(r[3]), ) for r in cur )
worker = ProducerConsumer(args, args.threads, producer, consumer)
worker.run(con, rows)
| null |
scripts/utils/vcfmod.py
|
vcfmod.py
|
py
| 3,580 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "vcf.Reader",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cStringIO.StringIO",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "vcf.Reader",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cStringIO.StringIO",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cStringIO.StringIO",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "vcf.Writer",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cStringIO.StringIO",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "utils.removeFiles",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pysam.Samfile",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 81,
"usage_type": "argument"
},
{
"api_name": "vcf.model._Substitution",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "vcf.model",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "threadpool.ProducerConsumer",
"line_number": 113,
"usage_type": "call"
}
] |
372412912
|
import numpy as np
import random
import matplotlib.pyplot as plt
import pickle
infile = open('maze_examples\maze4x6.pckl', 'rb')
data = pickle.load(infile)
infile.close()
print(data)
nr, nc = data.shape # number of rows and columns in array
pc = [] # will contain open points along the boarder
pr = []
for i, m in enumerate(data[:, 0]): # looping over most left column
if m: # if m is true
pc.append(0)
pr.append(i)
for i, m in enumerate(data[:, nc - 1]): # looping over most right column
if m: # if m is true
pc.append(nc - 1)
pr.append(i)
for i, m in enumerate(data[0, :]): # looping over top row
if m: # if m is true
pc.append(i)
pr.append(0)
for i, m in enumerate(data[nr - 1, :]): # looping over bottom row
if m: # if m is true
pc.append(i)
pr.append(nr - 1)
start_point = [pr[0], pc[0]] # finds starting point
end_point = [pr[1], pc[1]] # finds end point
new_point_all = []
current_point = start_point
loop_counter = 0
while current_point != end_point:
possible_moves = []
if current_point[0] < nr:
if data[current_point[0] + 1, current_point[1]]: # Check cell down
possible_moves.append([current_point[0] + 1, current_point[1]])
if current_point[1] < nc:
if data[current_point[0], current_point[1] + 1]: # check cell right
possible_moves.append([current_point[0], current_point[1] + 1])
if current_point[0] > 0:
if data[current_point[0] - 1, current_point[1]]: # Check cell up
possible_moves.append([current_point[0] - 1, current_point[1]])
if current_point[1] > 0:
if data[current_point[0], current_point[1] - 1]: # check cell left
possible_moves.append([current_point[0], current_point[1] - 1])
new_point = random.choice(possible_moves) # New point choices a random co ordinates in possible moves list
new_point_all.append(new_point) # appends new points to the new point al list
plt.imshow(data, cmap="plasma")
plt.scatter(new_point[1], new_point[0], c="blue", alpha=0.9,
s=1000) # Makes a graph of each move as it passes through the while loop
plt.axis('off')
plt.show()
current_point = new_point # Checks if current point has reached the new point cordinates and kills the loop
if current_point == end_point:
print("End point reached")
break
loop_counter += 1 # set a loop counter to kill loop because there is a possibility it can run forever
if loop_counter > 10000:
print("Maximum number of loops reached")
break
print(new_point_all)
| null |
maze_runner.py
|
maze_runner.py
|
py
| 2,655 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pickle.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
}
] |
379337612
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 14:04:02 2021
@author: tfahry
"""
import pandas as pd
ldf = pd.read_csv('Lateness.csv', index_col='Date')
locset = list(set(ldf['Geography']))
loclist = []
temploc = []
for loc in ldf['Geography']:
if ';Terminate' in loc:
temploc.append(loc)
loclist.append(temploc)
temploc = []
else:
temploc.append(loc)
def compare(a,b):
global loclist
for sublist in loclist:
if a in sublist and b in sublist:
return sublist.index(a) - sublist.index(b)
else:
return 0
from functools import cmp_to_key
locset.sort(key = cmp_to_key(compare))
print(locset)
#%%
import numpy as np
df = pd.read_csv(r'C:\Users\Tfarhy\OneDrive - Network Rail\2021.02.15_Path Variance demos\pivoted-lateness.csv', index_col=0)
df = df.mask(df.eq('#ERROR'))
df = df.dropna(axis=1,thresh=len(df)*.98).dropna()
columns = sorted(list(df.columns), key = cmp_to_key(compare))
df = df[columns]
for col in columns:
df[col]= df[col].str.replace(',','')
df = df.astype(float)
bdf = pd.read_csv(r'C:\Users\Tfarhy\OneDrive - Network Rail\2021.02.15_Path Variance demos\pivoted-busyness.csv', index_col=0)
for col in bdf.columns:
bdf[col] = bdf[col].str.replace(',','')
bdf = bdf.astype(float)
odf = pd.read_csv(r'C:\Users\Tfarhy\OneDrive - Network Rail\2021.02.15_Path Variance demos\pivoted-offpeak.csv', index_col=0)
odf = odf.fillna(value=0)
odf = odf.astype(float)
X = df.merge(odf,how='left', left_index=True, right_index=True)
X = X.merge(bdf,how='left', left_index=True, right_index=True)
X = X[['OFF-PEAK','PEAK']]
y=df.values
X=X.values
#%%
# example of evaluating chained multioutput regression with an SVM model
import numpy as np
np.set_printoptions(suppress=True)
from numpy import mean
from numpy import std
from numpy import absolute
from sklearn.datasets import make_regression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.multioutput import RegressorChain
from sklearn.svm import LinearSVR
# define dataset
#X, y = make_regression(n_samples=1000, n_features=1,n_informative=1, n_targets=3, random_state=1, noise=0.5)
# define base model
model = LinearSVR()
# define the chained multioutput wrapper model
wrapper = RegressorChain(model)
# define the evaluation procedure
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate the model and collect the scores
n_scores = cross_val_score(wrapper, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
# force the scores to be positive
n_scores = absolute(n_scores)
# summarize performance
print('MAE: %.3f (%.3f)' % (mean(n_scores), std(n_scores)))
wrapper.fit(X,y)
print(wrapper.predict(X[0:20]))
print(y[0:20])
| null |
path-variance-preprocessing.py
|
path-variance-preprocessing.py
|
py
| 2,812 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "functools.cmp_to_key",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "functools.cmp_to_key",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.set_printoptions",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.LinearSVR",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sklearn.multioutput.RegressorChain",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.RepeatedKFold",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_val_score",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.absolute",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 100,
"usage_type": "call"
}
] |
43143127
|
# -*- coding: utf-8 -*-
import numpy as np
import cv2
# Função para gerar a peleta de cores com um início e fim de cor definida pelo usuário
def palette( b1, g1, r1, b2, g2, r2):
b = np.linspace(b1, b2, 256)
g = np.linspace(g1, g2, 256)
r = np.linspace(r1, r2, 256)
p1 = np.tile( b.reshape(256,1), 256 )
p2 = np.tile( g.reshape(256,1), 256 )
p3 = np.tile( r.reshape(256,1), 256 )
p1 = np.uint8(p1)
p2 = np.uint8(p2)
p3 = np.uint8(p3)
palette = np.dstack( (np.dstack( (p1,p2) ), p3) )
return palette
# Cor inicial
b1 = 56
g1 = 23
r1 = 87
# Cor final
b2 = 145
g2 = 201
r2 = 150
# Paleta de cores com as cores iniciais e finais definidas
paleta = palette(b1, g1, r1, b2, g2, r2)
img = cv2.imread('a.jpg', 0)
out = np.zeros( (img.shape[0], img.shape[1], 3) )
for i in range(img.shape[0]):
for j in range(img.shape[1]):
out[i][j] = paleta [ img[i][j] ][0]
out = np.uint8(out)
# Mostrar uma imagem
cv2.imshow('Paleta de cores', paleta)
cv2.imshow('Imagem Original', img)
cv2.imshow('Imagem resultante', out)
cv2.waitKey(0)
| null |
photoshop/gimp/colorization-image.py
|
colorization-image.py
|
py
| 1,128 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.linspace",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.dstack",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 50,
"usage_type": "call"
}
] |
567903868
|
from copy import deepcopy
from typing import List, Dict, Tuple
from cv2 import data
from vqa_benchmarking_backend.datasets.dataset import DataSample, DatasetModelAdapter, DiagnosticDataset
import torch
from skimage.util import noise, random_noise
from collections import defaultdict
@torch.no_grad()
def inputs_for_image_robustness_imagespace(current_sample: DataSample, trials: int = 3,
gaussian_mean: float = 0.0, gaussian_variance: float = 0.025,
salt_pepper_amount: float = 0.1, salt_vs_pepper_ratio: float = 0.5,
speckle_mean: float = 0.0, speckle_variance: float = 0.05,
noise_types = ['gaussian', 'poisson', 's&p', 'speckle'],
seed: int = 12345) -> List[DataSample]:
"""
NOTE: creates len(noise_types) * trials outputs, because 1 output per noise type ()
https://scikit-image.org/docs/stable/api/skimage.util.html#random-noise
Args:
noise_types: sub-list of ['gaussian', 'localvar', 'poisson', 'salt', 'pepper', 's&p', 'speckle']
Returns:
List[DataSample] of length len(noise_types)*trials
"""
candidates = []
noise_seed = seed
for i in range(trials):
# generate image noise
for noise_mode in noise_types:
kwargs = {'seed': noise_seed, 'clip': True, 'mode': noise_mode}
if noise_mode in ['gaussian', 'speckle']:
kwargs['mean'] = gaussian_mean if noise_mode == 'gaussian' else speckle_mean
kwargs['var'] = gaussian_variance if noise_mode == 'gaussian' else speckle_variance
if noise_mode in ['s&p']:
kwargs['amount'] = salt_pepper_amount
kwargs['salt_vs_pepper'] = salt_vs_pepper_ratio
noisy_img = random_noise(current_sample.image, **kwargs)
candidate = deepcopy(current_sample)
candidate.image = noisy_img
candidate.image_features = None # reset image features, they have to be recalculated
candidates.append(candidate)
noise_seed += 1 # need to change seed between iterations s.t. we obtain different samples each trial
return candidates
@torch.no_grad()
def inputs_for_image_robustness_featurespace(current_sample: DataSample, std: float = 0.01, trials: int = 15) -> List[DataSample]:
"""
Additive gaussian noise for input features
"""
candidates = []
for i in range(trials):
# generate gaussian noise, add to question features
candidate = deepcopy(current_sample)
candidate.image_features = torch.normal(mean=candidate.image_features, std=std)
candidates.append(candidate)
return candidates
@torch.no_grad()
def inputs_for_question_robustness_wordspace(current_sample: DataSample, trials: int = 15,
noise_types=['typo', 'insert', 'permute', 'synonyms', 'delete'],
max_edits_per_sample: int =2 ) -> List[DataSample]:
"""
Ideas:
* typos (might not be in vocab... - should be doable with BERT and fastText.subwords though)
* change order of words (does it have to be grammatically safe?)
* insert unneccessary words (when is that safe?)
* replace with synonyms (where to get synonym map?)
* delete word (when is that safe? e.g. don't delete 'color' from 'What color is...?')
maybe noise is more meaningful in feature space than word space
"""
raise NotImplementedError
@torch.no_grad()
def inputs_for_question_robustness_featurespace(current_sample: DataSample, adapter: DatasetModelAdapter, std: float = 0.01, trials: int = 15) -> List[DataSample]:
"""
Additive gaussian noise for input features
"""
candidates = []
for i in range(trials):
# generate gaussian noise, add to question features
candidate = deepcopy(current_sample)
if isinstance(candidate.question_features, type(None)):
candidate.question_features = adapter.get_question_embedding(candidate)
candidate.question_features = torch.normal(mean=candidate.question_features, std=std)
candidates.append(candidate)
return candidates
@torch.no_grad()
def eval_robustness(dataset: DiagnosticDataset, original_class_prediction: str, predictions: torch.FloatTensor) -> Tuple[Dict[int, float], float]:
"""
Evalutate predictions generated with `inputs_for_question_bias_featurespace`,
`inputs_for_question_bias_imagespace`,
`inputs_for_image_bias_featurespace` or
`inputs_for_image_bias_wordspace`.
Args:
predictions (trials): Model predictions (probabilities)
Returns:
* Mapping from best prediction class -> fraction of total predictions
* normalized robustness score (float), where 0 means not robust, and 1 means 100% robust
"""
trials = predictions.size(dim=0)
class_pred_counter = defaultdict(float)
robustness_score = []
for trial in range(trials):
top_pred_class = predictions[trial].squeeze().argmax(dim=-1).item() # scalar
top_answer = dataset.class_idx_to_answer(top_pred_class)
class_pred_counter[top_answer] += 1
if original_class_prediction == top_answer:
robustness_score.append(1.0)
else:
robustness_score.append(0.0)
for class_idx in class_pred_counter:
class_pred_counter[class_idx] /= trials
return class_pred_counter, sum(robustness_score)/len(robustness_score)
| null |
backend/vqa_benchmarking_backend/metrics/robustness.py
|
robustness.py
|
py
| 5,721 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "vqa_benchmarking_backend.datasets.dataset.DataSample",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "skimage.util.random_noise",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "vqa_benchmarking_backend.datasets.dataset.DataSample",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "vqa_benchmarking_backend.datasets.dataset.DataSample",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.normal",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "vqa_benchmarking_backend.datasets.dataset.DataSample",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "vqa_benchmarking_backend.datasets.dataset.DataSample",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "vqa_benchmarking_backend.datasets.dataset.DataSample",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "vqa_benchmarking_backend.datasets.dataset.DatasetModelAdapter",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.normal",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "vqa_benchmarking_backend.datasets.dataset.DiagnosticDataset",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "torch.FloatTensor",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 98,
"usage_type": "name"
}
] |
323254757
|
#!/usr/bin/python
import networkx as nx
import random
import math
# INDEPENDENT CASCADE MODEL
# Let S_0 is the set of seeds, i.e., nodes active at step 0,
# and more in general S_t is the set of nodes activated at step t.
# Moreover, we denote with S_<t the set of nodes activated before step t, i.e., S_<t is the union of S_i for i = 0, ..., t-1.
# The dynamics then proceeds as follows.
# At each time step t >= 1, for each node u in S_{t-1}, and each neighbor v not yet active (i.e., not in S_<t),
# with probability prob v is activated, i.e. v is inserted in S_t.
# The dynamics stops at the first time step t* such that S_t* is empty.
def cascade(graph, active):
#active represents the set S_t in the description above
if len(active) > 0:
#newactive represents the set S_{t+1}
newactive=set()
for i in active:
#This allows to keep track of S_<t, i.e. the set of nodes activated before time t
graph.nodes[i]['active']=True
for i in active:
for j in graph[i]:
if 'active' not in graph.nodes[j]:
r=random.random()
if r < graph[i][j]['prob']:
newactive.add(j)
cascade(graph,newactive)
return graph
# LINEAR THRESHOLD MODEL
# The dynamics then proceeds as follows.
# At each time step t >= 1, for each node v not yet activated (i.e., not in S_<t)
# if the fraction of activated neighbors (i.e., neighbors in S_<t)
# is above the threshold t(v), then v is activated, i.e., v is inserted in S_t.
# The dynamics stops at the first time step t* such that S_t* is empty.
def threshold(graph, active):
#Thresholds are randomly assigned only at first step
thresholds=nx.get_node_attributes(graph,'threshold')
if len(thresholds) == 0:
for i in graph.nodes():
graph.nodes[i]['threshold'] = random.random()
if len(active) > 0:
newactive=set()
for i in active:
graph.nodes[i]['active'] = True
for i in active:
for j in graph[i]:
if 'active' not in graph.nodes[j]:
#updating the number of active neighbors
if 'num_act' not in graph.nodes[j]:
graph.nodes[j]['num_act']=1
else:
graph.nodes[j]['num_act']+=1
# node activation
if graph.nodes[j]['num_act']/len(graph[j]) >= graph.nodes[j]['threshold']:
newactive.add(j)
threshold(graph,newactive)
return graph
# MAJORITY DYNAMICS
# At each time step we select a single node whose state does not corresponds to the state of the majority of the neighbors,
# and we fix it.
#
# Differently from the previous models, this dynamics allows to revise the state (i.e., to go from non-active to active and viceversa).
# We consider a single update at each time step. Indeed, multiple updates may lead the dynamics to never converge (e.g., the bipartite graph with each side having a different initial state).
# Note the different update order may lead to very different stable states (again, look at the case of the bipartite graph).
# This makes more complicate to forecast the outcome of dynamics running according to this dynamics.
def majority(graph, act, nact):
# At previous step there may be either nodes that have been activated or nodes that have been dis-activated
if len(act) > 0 or len(nact) > 0:
for i in act:
graph.nodes[i]['active']=True
#updating the number of active neighbors
for i in act:
for j in graph[i]:
if 'num_act' not in graph.nodes[j]:
graph.nodes[j]['num_act'] = 1
else:
graph.nodes[j]['num_act'] += 1
#updating the number of non active neighbors
for i in nact:
graph.nodes[i]['active']=False
for i in nact:
for j in graph[i]:
if 'num_act' not in graph.nodes[j]:
graph.nodes[j]['num_act'] = 0
else:
graph.nodes[j]['num_act'] -= 1
change=False
#We choose to update a node willing to become active before than a node willing to become non active
#This will maximizes the number of active nodes at the end of the dynamics
for i in graph.nodes():
#checking if i has an incentive to be activated
if ('active' not in graph.nodes[i] or not graph.nodes[i]['active']) and 'num_act' in graph.nodes[i] and graph.nodes[i]['num_act'] >= len(graph[i])/2:
majority(graph,[i],[])
change=True
# the break serves to update a single node at each time step
break
if not change: #if no node is willing to become active, checks for nodes willing to become non active
for i in graph.nodes():
# checking if i has an incentive to be dis-activated
if ('active' in graph.nodes[i] and graph.nodes[i]['active']) and ('num_act' not in graph.nodes[i] or graph.nodes[i]['num_act'] < len(graph[i])/2):
majority(graph,[],[i])
break
return graph
# VOTER MODEL
# Above models are essentially deterministic models (except, for example, for the random selection of thresholds).
# There are instead purely probabilistic models, such as the voter model.
#
# In this model, at each time step we select a random node, and we update this node's state by coping the state of a random neighbor.
# The advantage of these models is that they allow "wrong" choice, that may be realistic, and useful to escape from bad unrealistic equilibria.
#
# Note that this dynamics does not terminates, unless either all nodes are active, or all nodes are not.
# For this reason we only run a limited number of steps.
def voter(graph, seed, num_steps):
#Initialization
for i in graph.nodes():
if i in seed:
graph.nodes[i]['active'] = True
else:
graph.nodes[i]['active'] = False
#Update
for t in range(num_steps):
u=random.choice(list(graph.nodes()))
v=random.choice(list(graph[u]))
graph.nodes[u]['active'] = graph.nodes[v]['active']
return graph
# Since the independent cascade and the linear threshold models are not deterministic,
# the marginal contribution cannot be directly computed, but it should be estimated
# by averaging over multiple run of the dynamics.
#
# As we will observe below the number of runs necessary to achieve a good estimation is very large,
# and this increases the running time of the greedy algorithm.
# On the other side, if the greedy algorithm does not work with a good estimation,
# its approximation can become much larger thn what it is stated below.
def marginal_influence(graph,seeds,v,dynamics):
# In order to have that the returned estimation is close to the real marginal contribution with high probability,
# we have to choose a large number of runs. Specifically, to have that the estimated value is within the interval
# [(1-eps)*true value, (1+eps)*true value] with probability at least 1-nu the number of runs must be the following one
# num_repeat=math.ceil(2*graph.number_of_nodes()*math.ln(1/nu)/(eps*eps))
# E.g., if we want that the estimated value is at most 5% away from the real value with probability at least 90%,
# we should set eps = 0.05 and nu = 0.1, and we have num_repeat = 2*n*ln(10)*400 = 1842*n.
# If, instead, we want that the estimated value is at most 1% away from the real value with probability at least 99%,
# we should set eps = 0.01 and nu = 0.01, and we have num_repeat = 2*n*ln(100)*10000 = 92104*n.
eps=0.05
nu = 0.1
num_repeat = math.ceil(2 * graph.number_of_nodes() * math.log(1 / nu) / (eps * eps))
sumt=0
for i in range(num_repeat):
sumt+=len(nx.get_node_attributes(dynamics(graph,list(seeds+[v])),'active'))
return sumt/num_repeat
#The following greedy algorithms returns a set of seeds of size budget such that
#the (expected) number of active nodes at the end of the dynamics is a good approximation (namely, 1-1/e)
#of the maximum (expected) number of nodes that would be achieved by the best seed set of size budget,
#whenever the dynamics satisfies the following two properties:
#- Monotony: the (expected) number of active nodes at the end of the dynamics increases as the number of seed nodes increase;
#- Submodularity: the marginal contribution of a seed node (i.e., how much it increases the number of active nodes at the end of the dynamics)
# is larger when this seed node is added to a small seed set than when it is added to a large seed set.
#
# Interestingly, both the independent cascade model and the linear threshold model enjoy these properties.
# Hence, for these dynamics the following greedy algorithm returns a good approximation of the optimal seed set.
#
# The greedy algorithm simply works by adding at each time step the node with larger marginal contribution to the seed set
def greedy(graph,budget,dynamics):
seeds=[]
while budget > 0:
best=0
for v in graph.nodes():
if v not in seeds:
#compute the marginal contribution of each node that is not yet in the seed set
infl=marginal_influence(graph,seeds,v,dynamics)
#we save the one node with larger marginal contribution
if infl >= best:
best=infl
bestV=v
#we add the node with larger marginal contribution to the seed set
seeds.append(bestV)
budget-=1
return seeds
G = nx.Graph()
G.add_edge('A', 'B',prob=2/3)
G.add_edge('A', 'C',prob=2/3)
G.add_edge('B', 'C',prob=2/3)
G.add_edge('B', 'D',prob=2/3)
G.add_edge('D', 'E',prob=2/3)
G.add_edge('D', 'F',prob=2/3)
G.add_edge('D', 'G',prob=2/3)
G.add_edge('E', 'F',prob=2/3)
G.add_edge('F', 'G',prob=2/3)
seed = {'B'}
# INDEPENDENT CASCADE
print(list(nx.get_node_attributes(cascade(G, seed), 'active').keys()))
G = nx.Graph()
G.add_edge('A', 'B')
G.add_edge('A', 'C')
G.add_edge('B', 'C')
G.add_edge('B', 'D')
G.add_edge('D', 'E')
G.add_edge('D', 'F')
G.add_edge('D', 'G')
G.add_edge('E', 'F')
G.add_edge('F', 'G')
seed = {'B'}
# LINEAR THRESHOLD
print(list(nx.get_node_attributes(threshold(G, seed), 'active').keys()))
G = nx.Graph()
G.add_edge('A', 'B')
G.add_edge('A', 'C')
G.add_edge('B', 'C')
G.add_edge('B', 'D')
G.add_edge('D', 'E')
G.add_edge('D', 'F')
G.add_edge('D', 'G')
G.add_edge('E', 'F')
G.add_edge('F', 'G')
seed = {'B','F'}
# MAJORITY
active = nx.get_node_attributes(majority(G, seed, {}), 'active')
print([i for i in active.keys() if active[i]])
G = nx.Graph()
G.add_edge('A', 'B')
G.add_edge('A', 'C')
G.add_edge('B', 'C')
G.add_edge('B', 'D')
G.add_edge('D', 'E')
G.add_edge('D', 'F')
G.add_edge('D', 'G')
G.add_edge('E', 'F')
G.add_edge('F', 'G')
seed = {'B'}
# VOTER
active = nx.get_node_attributes(voter(G, seed, 10), 'active')
print([i for i in active.keys() if active[i]])
G=nx.Graph()
G.add_edge('A','B',prob=0.66)
G.add_edge('A','C',prob=0.6)
G.add_edge('B','C',prob=0.75)
G.add_edge('B','D',prob=0.55)
G.add_edge('D','E',prob=0.7)
G.add_edge('D','F',prob=0.5)
G.add_edge('D','G',prob=0.45)
G.add_edge('E','F',prob=0.8)
G.add_edge('F','G',prob=0.66)
print(greedy(G,2,cascade))
| null |
utils/lesson3.py
|
lesson3.py
|
py
| 11,467 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.random",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "networkx.get_node_attributes",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "networkx.get_node_attributes",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "networkx.get_node_attributes",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "networkx.get_node_attributes",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "networkx.get_node_attributes",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "networkx.get_node_attributes",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 250,
"usage_type": "call"
}
] |
81805111
|
#!/home/kjeong23/softwares/bin/python3.4
# program to calculate hoppingless MSD of surfactants
# v04 renovation: imported mdtraj to process xtc file. Also, xtc file is truncated with 1 atomtype.
# definition of non-hopping: until it is detected in 'other' micelle.
# in migevent scheme, until the arrival time for the destination micelle.
# algorithm: read migevent to record cutting points -> track 1 surfactant. cut traj.
# -> record r0 (now don't calc COM) -> calc square displacement until it hops.(consider PBC)
# in every step of COM SD calc, pile up in the bins, adding up "counts"(w.r.t. t)
# to compare statistical weight of all dr^2(t). -> calc MSD(t).
# -> display MSD(t), count(t).
# ** binary .xtc trajectory file: requires pbc -whole treatment!!
#inputs: grofile, traj xtc file, migfile (listed inter-micellar exchanges, molecule index sorted)
#output: file of {t, nonhop_MSD(t), count(t)}
#v03: started consideration of hopping: when a surfactant hopps, set new r0,t=0
#philosophy of ensemble averaging : the issue is, because of unequal length of non-hop traj,
#averaging without care will result in unequal weighting of averaging. but, still do that.
#leave statistical count beside, to make good averaging of different surfactant groups later.
import math
import sys
import numpy
import timeit
import mdtraj as md
tstep=0.200 #use skip10-trajectory
#now do not use atomic mass matrix anymore. Don't care about COM, and only track 1 atom.
#def mig_stepteller(migline,tb1,tb2,mi1,mi2): #read migline, record surf index and time
# #read surf index and initial time(important)
# split=migline.split()
# mindex,tarr=int(split[0]),float(split[4]) #mindex : 'string' form
# hop_info=[]
# if mi1<=mindex<=mi2 and tb1<=tarr<=tb2: #collect info only in this case
# ntarr=(tarr-tb1)/tstep #collect info only in this case. frame index in entire trajectory
# ntarr=int(round(ntarr))
# hop_info=[mindex,ntarr]
# return hop_info
def pbcdr(ri,rf,box): #displacement vector with considering pbc
dr=rf-ri
for i in range(3):
if dr[i]>(box[i]/2.0):
dr[i]-=box[i]
elif dr[i]<(-box[i]/2.0):
dr[i]+=box[i]
return dr
def SD_calculator(trimtraj,nstep): #square displacement calculator. assumes that trajectory has 1 particle.
sdbin1,count1=numpy.zeros(nstep),numpy.zeros(nstep)
trimstep=trimtraj.n_frames
crd=trimtraj.xyz
box=numpy.array(trimtraj.unitcell_lengths[0])
#for ntau in range(trimstep): #use tau-step loop first
# for i in range(trimstep-ntau): #initial snapshot
for i in range(trimstep-1):
vec=numpy.zeros(3)
for j in range(i+1,trimstep):
dr=pbcdr(crd[j][0],crd[j-1][0],box)
vec+=dr
rmag2=numpy.dot(vec,vec)
sdbin1[j-i]+=rmag2
count1[j-i]+=1
sdbin1[0],count1[0]=0.0,1
return sdbin1,count1
#main fxn
def main():
#Part to load coordinate file, migration event file
grofile = sys.argv[1]
trjfile = sys.argv[2]
migfile = sys.argv[3]
outfile = open(sys.argv[4],'w')
tblock=input("What is the time interval of trajectory you're loading(in ns)? ex) 200 400 \n")
tb12=tblock.split()
tb1,tb2=float(tb12[0]),float(tb12[1])
ntb1,ntb2=tb1/tstep,tb2/tstep
ntb1,ntb2=int(round(ntb1)),int(round(ntb2))
mi12=input("What is the surfactant index interval of MSD calculation? ex) 0 9 \n")
mi12=mi12.split()
mi1,mi2=int(mi12[0]),int(mi12[1])
start_time=timeit.default_timer()
#input 1 : load surf traj. (big file)
traj=md.load(trjfile,top=grofile)
traj=traj[ntb1:ntb2+1]
topology=traj.topology
if topology.n_residues <= mi2: #surfactant index exceeded
mi2=topology.n_residues-1 #autofix surfactant index range
elapsed=timeit.default_timer() - start_time
print('finished trajectory loading {}'.format(elapsed))
print(traj)
nstep=traj.n_frames
#prepare bins for MSD
sdbin,count=numpy.zeros(nstep),numpy.zeros(nstep) #bin & stat weight of dr^2(t) ave
#input 2 : load migevents. Then, for target surfactant molecules, should make 'cutting time list'.
# the cutting time list will be used for next loop, to slice trajectories and calc MSD internally for each traj segment.
#tagmi=mi1 #index of surfactant being searched to write cuttlist
#row,cuttlist=[],[]
#while True:
# migline=migfile.readline()
# if migline=='':#EOF
# break
# info=mig_stepteller(migline,tb1,tb2,mi1,mi2)
# if len(info)!=0 and info[0]==tagmi:
# row.append(info[1])
# elif len(info)!=0 and info[0]>tagmi:
# row.append(nstep)
# tagmi=info[0]
# cuttlist.append(row)
# row=[info[1]]
#cuttlist.append(row) #for the last molecule\
total_miginfo=numpy.loadtxt(migfile)
row,cuttlist=[],[]
for mi in range(mi1,mi2+1):
mi_mig=total_miginfo[total_miginfo[:,0]==mi]
mi_mig=mi_mig[tb1<=mi_mig[:,4]]
mi_mig=mi_mig[mi_mig[:,4]<=tb2]
if mi_mig.ndim==1: #1-line
ntarr=(mi_mig[4]-tb1)/tstep
ntarr=int(round(ntarr))
if len(row)==0: row.append(ntarr)
elif row[-1]!=ntarr: row.append(ntarr) #avoid duplicate
else:
for entry in mi_mig:
ntarr=(entry[4]-tb1)/tstep
ntarr=int(round(ntarr))
#duplicate check
if len(row)==0: row.append(ntarr)
elif row[-1]!=ntarr: row.append(ntarr) #avoid duplicate
if len(row)==0: row.append(nstep)
elif row[-1]!=nstep: row.append(nstep)
if row[0]==0: del row[0]
cuttlist.append(row)
row=[]
elapsed=timeit.default_timer() - start_time
print('migevent information loading complete {}'.format(elapsed))
print(cuttlist)
#loop of trajectory slicing and MSD calculation
mindex=mi1
for row in cuttlist: #each row represent 1 surfactant
lastframe=0
#remove duplicates in 1 row of cuttlist
for x in row:
trimtraj=traj[lastframe:x]
trimtraj=trimtraj.atom_slice(topology.select('resid '+str(mindex)))
sdbin1,count1=SD_calculator(trimtraj,nstep) #square displacement and statist.count for 1 traj-segment
sdbin+=sdbin1
count+=count1
lastframe=x
elapsed=timeit.default_timer() - start_time
print('surf# {} partial trajcut {} MSD calculated. time {:11.4f}'.format(mindex,x,elapsed))
mindex+=1
#printing section
sdbin=numpy.divide(sdbin,count,out=numpy.zeros_like(sdbin), where=count!=0)
#outfile.write('{:11.4f} {:11.4f} {:11.4f}\n'.format(0,0,0))
for i in range(nstep):
outfile.write('{:11.4f} {:11.4f} {:11.4f}\n'.format(tstep*i,sdbin[i],count[i]))
outfile.close()
if __name__ == "__main__": main()
| null |
py_development/data_process/micelles/nohopmsd_v04.py
|
nohopmsd_v04.py
|
py
| 6,484 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.zeros",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "timeit.default_timer",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "mdtraj.load",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 163,
"usage_type": "call"
}
] |
215633734
|
#!usr/bin/env python
# encoding: utf-8
"""
@author: tommars00
@softwawre: PyCharm Community Edition
@file: run.py
@time: 8:37 PM
"""
from scrapy import cmdline
name = 'book'
cmd = 'scrapy crawl {0}'.format(name)
cmdline.execute(cmd.split())
| null |
dangdang/run.py
|
run.py
|
py
| 246 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scrapy.cmdline.execute",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scrapy.cmdline",
"line_number": 17,
"usage_type": "name"
}
] |
573482198
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import os, glob, json
import config as cfg
from module.cluster import VAE_Cluster
from module.vmt import VMT, TemplateD, QHead
from module.vae import GaussianKLLoss
from dataset import KKDayUser, seq_collate
from constant import Constants
import fasttext
from utils import get_fixed_temperature, get_losses
import sklearn
import numpy as np
from tensorboardX import SummaryWriter
from utils import gradient_penalty, str2bool, chunks
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
from shutil import copyfile
import pickle
def data_iter(dataloader):
def function():
while True:
for batch in dataloader:
yield batch
return function()
class TemplateTrainer():
def __init__(self, args):
self.dataset2 = KKDayUser(-1, 'data/kkday_dataset/user_data',
'data/kkday_dataset/matrix_factorized_64.pkl',
prefix='item_graph', embedding=None, max_length=args.max_seq_len, force_fix_len=args.grad_penalty or args.full_text,
token_level=args.tokenize, is_train=True)
self.dataset1 = KKDayUser(-1, 'data/kkday_dataset/user_data',
'data/kkday_dataset/matrix_factorized_64.pkl',
prefix='item_graph', embedding=None, max_length=args.max_seq_len, force_fix_len=args.grad_penalty or args.full_text,
token_level=args.tokenize, is_train=True)
self.val_dataset = KKDayUser(-1, 'data/kkday_dataset/user_data',
'data/kkday_dataset/matrix_factorized_64.pkl',
prefix='item_graph', is_train=False,embedding=None, max_length=args.max_seq_len, force_fix_len=args.grad_penalty or args.full_text,
token_level=args.tokenize)
self.model = VMT(args.gen_embed_dim, self.dataset2.vocab_size,
enc_hidden_size=128, dec_hidden_size=128, tmp_category=args.tmp_cat_dim,
tmp_latent_dim=args.tmp_latent_dim, desc_latent_dim=args.desc_latent_dim, user_latent_dim=args.user_latent_dim,
biset=args.biset,
max_seq_len=args.max_seq_len-1, gpu=True)
output_latent = args.desc_latent_dim+(args.tmp_latent_dim*args.tmp_cat_dim)
self.D = TemplateD(64, args.max_seq_len, 32, self.dataset2.vocab_size, output_latent)
self.Q = QHead(self.D.feature_dim, args.tmp_cat_dim*args.tmp_latent_dim , args.desc_latent_dim)
self.D.cuda()
self.model.cuda(), self.Q.cuda()
args.vocab_size = self.dataset1.vocab_size
self.args = args
max_temp = 1.0
temp_min = 0.00005
temp = 1.0
self.gumbel_temp = temp
N = args.iterations
self.gumbel_anneal_rate = 1 / N
self.temp_anneal = max_temp / N
self.temp = args.temperature_min
# self.cluster_opt = optim.Adam(self.C.parameters(), lr=args.dis_lr)
self.gen_opt = optim.Adam(list(self.model.parameters())+list(self.Q.parameters()), lr=args.gen_lr)
self.gen_adv_opt = optim.Adam(list(self.model.parameters())+list(self.Q.parameters()), lr=args.gen_adv_lr, betas=(0.5, 0.999))
self.dis_adv_opt = optim.Adam(self.D.parameters(), lr=args.dis_lr, betas=(0.5, 0.999))
opt_level = args.opt_level
# [self.model, self.C], self.gen_adv_opt = amp.initialize([self.model, self.C], self.gen_adv_opt, opt_level=opt_level)
self.dataloader1 = torch.utils.data.DataLoader(self.dataset1, num_workers=3,
collate_fn=seq_collate, batch_size=args.batch_size, shuffle=True, drop_last=True)
self.dataloader2 = torch.utils.data.DataLoader(self.dataset2, num_workers=3,
collate_fn=seq_collate, batch_size=args.batch_size, shuffle=True, drop_last=True)
self.mle_criterion = nn.NLLLoss(ignore_index=Constants.PAD)
self.KL_loss = GaussianKLLoss()
self.mse_criterion = nn.MSELoss()
self.xent_criterion = nn.CrossEntropyLoss()
self.data_iterator1 = data_iter(self.dataloader1)
self.data_iterator2 = data_iter(self.dataloader2)
self.init_sample_inputs()
def pretrain(self, epochs, writer=None):
from dataset import KKDayUser, seq_collate
dataset = KKDayUser(-1, 'data/kkday_dataset/user_data',
'data/kkday_dataset/matrix_factorized_64.pkl',
prefix='item_graph', embedding=None, max_length=128,
token_level='word', is_train=True)
val_dataset = KKDayUser(-1, 'data/kkday_dataset/user_data',
'data/kkday_dataset/matrix_factorized_64.pkl',
prefix='item_graph', embedding=None, max_length=128,
token_level='word', is_train=False)
vae = self.model.template_vae
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.vae = vae
model = Model().cuda()
pretrain_dataloader = torch.utils.data.DataLoader(dataset, num_workers=6,
collate_fn=seq_collate, batch_size=32, shuffle=True, drop_last=True)
val_dataloader = torch.utils.data.DataLoader(val_dataset, num_workers=6,
collate_fn=seq_collate, batch_size=56, shuffle=True, drop_last=True)
optimizer = optim.Adam(model.parameters(), lr=0.0001, betas=(0.5, 0.999))
nll_criterion = nn.NLLLoss(ignore_index=Constants.PAD)
iter_ = 0
max_temp = 1.0
temp_min = 0.00005
temp = 1.0
N = len(pretrain_dataloader) * 10
anneal_rate = max_temp / N
for e in range(epochs):
for batch in pretrain_dataloader:
inputs, target = batch['tmp'][:, :-1], batch['tmp'][:, 1:]
title = batch['tgt'][:, :-1]
title = title.cuda()
inputs = inputs.cuda()
target = target.cuda()
decoder_output, latent, inp = model.vae(inputs, max_length=inputs.shape[1], temperature=temp)
nll_loss = nll_criterion(decoder_output.view(-1, dataset.vocab_size),
target.flatten())
if iter_ % 100 == 1:
temp = np.maximum(temp * np.exp(-anneal_rate * iter_), temp_min)
log_ratio = torch.log(latent * args.tmp_cat_dim + 1e-20)
kl_loss = torch.sum(latent * log_ratio, dim=-1).mean()
optimizer.zero_grad()
loss = kl_loss + nll_loss
loss.backward()
optimizer.step()
decoder_output, latent, inp = model.vae(title, max_length=title.shape[1], temperature=temp)
nll_loss = nll_criterion(decoder_output.view(-1, dataset.vocab_size),
target.flatten())
if iter_ % 100 == 1:
temp = np.maximum(temp * np.exp(-anneal_rate * iter_), temp_min)
log_ratio = torch.log(latent * args.tmp_cat_dim + 1e-20)
kl_loss = torch.sum(latent * log_ratio, dim=-1).mean()
optimizer.zero_grad()
loss = kl_loss + nll_loss
loss.backward()
optimizer.step()
if iter_ % 100 == 0:
print('loss: {:.4f}'.format(loss.item()))
if iter_ % 1000 == 0 and iter_ != 0:
model.eval()
with torch.no_grad():
print('sample latent')
clusters = {}
for batch in val_dataloader:
inputs, target = batch['tmp'][:, :-1], batch['tmp'][:, 1:]
inputs = inputs.cuda()
target = target.cuda()
decoder_output, latents, inp = model.vae(inputs, max_length=inputs.shape[1], temperature=temp)
for idx, sent in enumerate(batch['tmp'][:, 1:]):
sentence = []
for token in sent:
if token.item() == Constants.EOS:
break
sentence.append( dataset.idx2word[token.item()])
sent = ' '.join(sentence)
latent = latents[idx].cpu().detach().numpy()
clusters[sent] = latent
model.train()
iter_ += 1
self.model.template_vae.load_state_dict(model.vae.state_dict())
for params in self.model.template_vae.parameters():
params.requires_grad = False
torch.save(self.model, 'save/pretrain_vmt.pt')
def dis_step(self, i):
batch1 = next(self.data_iterator1)
src_inputs1 = batch1['src']
tmp1 = batch1['tmp']
items1, users1 = batch1['items'], batch1['users']
item_ids1 = batch1['item_ids']
inputs1, target1 = batch1['tgt'][:, :-1], batch1['tgt'][:, 1:]
if cfg.CUDA:
inputs1, items1, users1 = inputs1.cuda(), items1.cuda(), users1.cuda()
src_inputs1 = src_inputs1.cuda()
item_ids1 = item_ids1.cuda()
inputs1 = inputs1.cuda()
target1 = target1.cuda()
tmp1 = tmp1.cuda()
batch2 = next(self.data_iterator2)
tmp2 = batch2['tmp']
if cfg.CUDA:
tmp2 = tmp2.cuda()
with torch.no_grad():
desc1_outputs, desc1_latent, desc1_mean, desc1_std = self.model.encode_desc(src_inputs1)
temp1_outputs, tmp1_latent = self.model.encode_tmp(tmp1)
real_latent = torch.cat([ tmp1_latent, desc1_latent ], axis=1)
_, _, one_hots = self.model.decode(tmp1_latent, desc1_latent, users1,
desc1_outputs, temp1_outputs,
max_length=target1.shape[1], gumbel=True)
temp1_outputs, tmp2_latent = self.model.encode_tmp(tmp2)
fake_latent = torch.cat([ tmp2_latent, desc1_latent ], axis=1)
_, _, fake_hots = self.model.decode(tmp2_latent, desc1_latent, users1,
desc1_outputs, temp1_outputs,
max_length=target1.shape[1], gumbel=True)
real_samples = F.one_hot(target1, self.args.vocab_size).float()
real_logits, d_latent = self.D(real_samples)
latent_dim = tmp2_latent.shape[1]
desc_m, desc_v, tmp_latent = self.Q(d_latent)
desc_kl_loss = self.KL_loss(desc_m, desc_v)
template_loss_real = self.xent_criterion( tmp_latent.view(-1, self.args.tmp_cat_dim),
torch.argmax( tmp1_latent.view(-1, self.args.tmp_cat_dim), -1 ).long())
fake_logits, d_latent = self.D(fake_hots)
desc_m, desc_v, tmp_latent = self.Q(d_latent)
desc_kl_loss += self.KL_loss(desc_m, desc_v)
template_loss_real += self.xent_criterion( tmp_latent.view(-1, self.args.tmp_cat_dim),
torch.argmax( tmp1_latent.view(-1, self.args.tmp_cat_dim), -1 ).long())
_, d_loss = get_losses(real_logits, fake_logits, loss_type='rsgan')
self.dis_adv_opt.zero_grad()
cond_loss = template_loss_real + desc_kl_loss
loss = d_loss + cond_loss
loss.backward()
self.dis_adv_opt.step()
return loss.item(), d_loss.item(), cond_loss.item()
def step(self, i):
self.D.eval()
batch1 = next(self.data_iterator1)
src_inputs1 = batch1['src']
tmp1 = batch1['tmp']
items1, users1 = batch1['items'], batch1['users']
item_ids1 = batch1['item_ids']
inputs1, target1 = batch1['tgt'][:, :-1], batch1['tgt'][:, 1:]
if cfg.CUDA:
inputs1, items1, users1 = inputs1.cuda(), items1.cuda(), users1.cuda()
src_inputs1 = src_inputs1.cuda()
item_ids1 = item_ids1.cuda()
inputs1 = inputs1.cuda()
target1 = target1.cuda()
tmp1 = tmp1.cuda()
batch2 = next(self.data_iterator2)
src_inputs2 = batch2['src']
tmp2 = batch2['tmp']
items2, users2 = batch2['items'], batch2['users']
item_ids2 = batch2['item_ids']
inputs2, target2 = batch2['tgt'][:, :-1], batch2['tgt'][:, 1:]
if cfg.CUDA:
inputs2, items2, users2 = inputs2.cuda(), items2.cuda(), users2.cuda()
src_inputs2 = src_inputs2.cuda()
item_ids2 = item_ids2.cuda()
inputs2 = inputs2.cuda()
target2 = target2.cuda()
tmp2 = tmp2.cuda()
# nll_loss1, kl_loss1 = self.model.cycle_template(tmp1[:, :-1], tmp1[:, 1:], temperature=self.gumbel_temp)
# nll_loss2, kl_loss2 = self.model.cycle_template(tmp2[:, :-1], tmp2[:, 1:], temperature=self.gumbel_temp)
# cycle_nll = (nll_loss1+nll_loss2)/2
# cycle_kl = (kl_loss1+kl_loss2)/2
# nll_loss1, kl_loss1 = self.model.cycle_template(inputs1, tmp1[:, 1:], temperature=self.gumbel_temp)
# nll_loss2, kl_loss2 = self.model.cycle_template(inputs2, tmp2[:, 1:], temperature=self.gumbel_temp)
# title_cycle_nll = (nll_loss1+nll_loss2)/2
# title_cycle_kl = (kl_loss1+kl_loss2)/2
desc1_outputs, desc1_latent, desc1_mean, desc1_std = self.model.encode_desc(src_inputs1)
tmp1_outputs, tmp1_latent = self.model.encode_tmp(tmp1)
output1_target, output1_logits = self.model.decode(tmp1_latent, desc1_latent, users1,
desc1_outputs, tmp1_outputs,
max_length=target1.shape[1])
desc2_outputs, desc2_latent, desc2_mean, desc2_std = self.model.encode_desc(src_inputs2)
tmp1_outputs, tmp2_latent = self.model.encode_tmp(tmp2)
output2_target, output2_logits, output2_one_hot = self.model.decode(tmp2_latent, desc2_latent, users2,
desc2_outputs, tmp1_outputs,
max_length=target2.shape[1], gumbel=True)
desc_kl_loss = self.KL_loss(desc2_mean, desc2_std) + self.KL_loss(desc1_mean, desc1_std)
construct1_loss = self.mle_criterion(output1_target.view(-1, self.args.vocab_size), target1.flatten())
construct2_loss = self.mle_criterion(output2_target.view(-1, self.args.vocab_size), target2.flatten())
reconstruct_loss = (construct1_loss+construct2_loss)/2
total_kl_loss = desc_kl_loss# + cycle_kl
# cycle_nll_loss = cycle_nll
# title_loss = (title_cycle_kl* self.temp + title_cycle_nll)
real_samples = F.one_hot(target1, self.args.vocab_size).float()
d_out_real, real_latent = self.D(real_samples)
desc_m, desc_v, tmp_latent = self.Q(real_latent)
desc_kl_loss = self.KL_loss(desc_m, desc_v)
template_loss = self.xent_criterion( tmp_latent.view(-1, self.args.tmp_cat_dim),
torch.argmax( tmp1_latent.view(-1, self.args.tmp_cat_dim), -1 ).long())
_, _, output21_one_hot = self.model.decode(tmp1_latent, desc2_latent, users2,
desc2_outputs, tmp1_outputs,
max_length=target2.shape[1], gumbel=True)
d_out_fake, fake_latent = self.D(output21_one_hot)
desc_m, desc_v, tmp_latent = self.Q(fake_latent)
desc_kl_loss += self.KL_loss(desc_m, desc_v)
template_loss += self.xent_criterion( tmp_latent.view(-1, self.args.tmp_cat_dim),
torch.argmax( tmp1_latent.view(-1, self.args.tmp_cat_dim), -1 ).long())
g_loss, _ = get_losses(d_out_real, d_out_fake, self.args.loss_type)
g_loss_ = g_loss + desc_kl_loss + template_loss
total_loss = reconstruct_loss*self.args.re_weight + \
total_kl_loss* self.temp + \
g_loss_ * self.args.dis_weight
# title_loss + \
# cycle_nll_loss*self.args.cycle_weight + \
self.gen_adv_opt.zero_grad()
# with amp.scale_loss(total_loss, self.gen_adv_opt) as scaled_loss:
# scaled_loss.backward()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), cfg.clip_norm)
# torch.nn.utils.clip_grad_norm_(self.C.parameters(), cfg.clip_norm)
# torch.nn.utils.clip_grad_norm_(amp.master_params(self.gen_adv_opt), cfg.clip_norm)
self.gen_adv_opt.step()
self.D.train()
return total_loss.item(), total_kl_loss.item(), reconstruct_loss.item(), 0, desc_kl_loss.item(), template_loss.item(), g_loss_.item()
def sample_results(self, writer, step=0):
sample_size = 5
embed = self.users[:sample_size,]
desc1_outputs, desc1_latent, _, _ = self.model.encode_desc(self.descripion[:sample_size])
tmp1_outputs, tmp1_latent = self.model.encode_tmp(self.template[:sample_size])
tmp2_outputs, tmp2_latent = self.model.encode_tmp(self.template2[:sample_size])
_, output_title = self.model.decode(tmp1_latent, desc1_latent, embed,
desc1_outputs, tmp1_outputs,
max_length=self.args.max_seq_len)
_, output_title2 = self.model.decode(tmp2_latent, desc1_latent, embed,
desc1_outputs, tmp2_outputs,
max_length=self.args.max_seq_len)
samples, new_sample = '', ''
with torch.no_grad():
for idx, sent in enumerate(output_title):
sentence = []
for token in self.template[idx][1:]:
if token.item() == Constants.EOS:
break
sentence.append( self.dataset1.idx2word[token.item()])
samples += str(idx) + '. [tmp]: ' +' '.join(sentence) + '\n\n'
sentence = []
for token in sent:
if token.item() == Constants.EOS:
break
sentence.append( self.dataset1.idx2word[token.item()])
samples += ' [out]: ' +' '.join(sentence[:30]) + '\n\n'
sentence = []
for token in self.template2[idx][1:]:
if token.item() == Constants.EOS:
break
sentence.append( self.dataset1.idx2word[token.item()])
new_sample += str(idx) + '. [tmp]: ' +' '.join(sentence) + '\n\n'
sentence = []
for token in output_title2[idx]:
if token.item() == Constants.EOS:
break
sentence.append( self.dataset1.idx2word[token.item()])
new_sample += ' [mod]: ' +' '.join(sentence[:30]) + '\n\n'
if writer != None:
writer.add_text("Text", samples, step)
writer.flush()
writer.add_text("Transfer", new_sample, step)
writer.flush()
def calculate_bleu(self, writer, step=0, size=1000, ngram=4):
eval_dataloader = torch.utils.data.DataLoader(self.val_dataset, num_workers=8,
collate_fn=seq_collate, batch_size=20, shuffle=False)
sentences, references = [], []
scores_weights = { str(gram): [1/gram] * gram for gram in range(1, ngram+1) }
scores = { str(gram): 0 for gram in range(1, ngram+1) }
# print('Evaluate bleu scores', scores)
with torch.no_grad():
for batch in eval_dataloader:
src_inputs = batch['src']
tmp = batch['tmp']
items, users = batch['items'], batch['users']
inputs, target1 = batch['tgt'][:, :-1], batch['tgt'][:, 1:]
if cfg.CUDA:
inputs, items, users = inputs.cuda(), items.cuda(), users.cuda()
src_inputs = src_inputs.cuda()
inputs = inputs.cuda()
target1 = target1.cuda()
tmp = tmp.cuda()
batch_size = src_inputs.shape[0]
embed = users
desc_outputs, desc_latent, _, _ = self.model.encode_desc(src_inputs)
tmp_outputs, tmp_latent = self.model.encode_tmp(tmp)
_, output_title = self.model.decode(tmp_latent, desc_latent, embed,
desc_outputs, tmp_outputs,
max_length=self.args.max_seq_len)
# output_title = torch.argmax(output_logits, dim=-1)
for idx, sent_token in enumerate(batch['tgt'][:, 1:]):
reference = []
for token in sent_token:
if token.item() == Constants.EOS:
break
reference.append(self.val_dataset.idx2word[token.item()] )
references.append(reference)
sent = output_title[idx]
sentence = []
for token in sent:
if token.item() == Constants.EOS:
break
sentence.append( self.val_dataset.idx2word[token.item()])
sentences.append(sentence)
for key, weights in scores_weights.items():
scores[key] += sentence_bleu([reference], sentence, weights,
smoothing_function=SmoothingFunction().method1)
if len(sentences) > size:
break
with open(os.path.join(self.save_path, '{}_reference.txt'.format(0)), 'w') as f:
for sent in references:
f.write(' '.join(sent)+'\n')
with open(os.path.join(self.save_path, '{}_generate.txt'.format(step)), 'w') as f:
for sent in sentences:
f.write(' '.join(sent)+'\n')
if writer != None:
for key, weights in scores.items():
scores[key] /= len(sentences)
writer.add_scalar("Bleu/score-"+key, scores[key], step)
writer.flush()
def init_sample_inputs(self):
batch1 = next(self.data_iterator1)
src_inputs1 = batch1['src']
tmp1 = batch1['tmp']
items1, users1 = batch1['items'], batch1['users']
inputs1, target1 = batch1['tgt'][:, :-1], batch1['tgt'][:, 1:]
if cfg.CUDA:
inputs1, items1, users1 = inputs1.cuda(), items1.cuda(), users1.cuda()
src_inputs1 = src_inputs1.cuda()
inputs1 = inputs1.cuda()
target1 = target1.cuda()
tmp1 = tmp1.cuda()
self.title_input = inputs1
self.items, self.users = items1, users1
self.descripion = src_inputs1
self.template = tmp1
batch2 = next(self.data_iterator1)
tmp2 = batch2['tmp']
if cfg.CUDA:
tmp2 = tmp2.cuda()
self.template2 = tmp2
def test(self):
i = 0
self.temp = np.maximum(self.temp * np.exp(-self.temp_anneal * i), 0.00005)
self.gumbel_temp = np.minimum(self.args.gumbel_max - (self.gumbel_temp * np.exp(-self.gumbel_anneal_rate * i)), 0.00005)
for i in range(100):
self.temp = np.maximum(self.temp * np.exp(-self.temp_anneal * i), 0.00005)
self.gumbel_temp = np.maximum(self.args.gumbel_max ** (self.gumbel_anneal_rate * i), 0.00005)
print(self.temp, self.gumbel_temp)
def train(self):
if self.args.pretrain_embeddings != None:
model = fasttext.load_model(self.args.pretrain_embeddings)
embedding_weight = self.model.embedding.cpu().weight.data
hit = 0
for word, idx in self.dataset1.word2idx.items():
embedding_weight[idx] = torch.from_numpy(model[word]).float()
hit += 1
embedding_weight = embedding_weight.cuda()
self.model.embedding.weight.data.copy_(embedding_weight)
self.model.embedding.cuda()
self.model.title_decoder.decoder.outputs2vocab.weight.data.copy_(self.model.embedding.weight.data)
self.model.template_vae.load_state_dict( torch.load('save/pretrain_vmt.pt').template_vae.state_dict() )
for params in self.model.template_vae.parameters():
params.requires_grad = False
from datetime import datetime
cur_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
save_path = 'save/temp_{}-{}'.format(self.args.name, cur_time)
os.makedirs(save_path, exist_ok=True)
copyfile('module/vmt.py', os.path.join(save_path, 'vmt.py'))
copyfile('module/vae.py', os.path.join(save_path, 'vae.py'))
self.save_path = save_path
with open(os.path.join(save_path, 'params.json'), 'w') as f:
json.dump(vars(self.args), f)
writer = SummaryWriter('logs/temp_{}-{}'.format(self.args.name, cur_time))
# self.pretrain(1, writer=writer)
i = 0
self.temp = self.args.kl_weight
self.gumbel_temp = np.minimum(self.args.gumbel_max - (self.gumbel_temp * np.exp(-self.gumbel_anneal_rate * i)), 0.00005)
with tqdm(total=args.iterations+1, dynamic_ncols=True) as pbar:
for i in range(args.iterations+1):
self.model.train(), self.D.eval(), self.Q.train()
total_loss, total_kl_loss, reconstruct_loss, cycle_nll_loss, title_cycle_nll, tmp_cycle_nll, g_loss = self.step(i)
self.model.eval(), self.D.train(), self.Q.eval()
d_loss, logit_loss, mle_loss = self.dis_step(i)
if i % self.args.bleu_iter == 0:
self.model.eval()
self.calculate_bleu(writer, i)
self.model.train()
if i % cfg.adv_log_step == 0 and writer != None:
writer.add_scalar('G/loss', total_loss, i)
writer.add_scalar('G/kl_loss', total_kl_loss, i)
writer.add_scalar('G/reconstruct_loss', reconstruct_loss, i)
writer.add_scalar('G/cycle_nll_loss', tmp_cycle_nll, i)
writer.add_scalar('G/gan', g_loss, i)
writer.add_scalar('D/d_loss', d_loss, i)
writer.add_scalar('D/logit', logit_loss, i)
writer.add_scalar('D/mle', mle_loss, i)
writer.add_scalar('temp/gumbel', self.gumbel_temp, i)
writer.add_scalar('temp/kl', self.temp, i)
if i % 100 == 0:
self.model.eval()
self.sample_results(writer, i)
self.model.train()
self.gumbel_temp = np.maximum(self.args.gumbel_max ** (self.gumbel_anneal_rate * i), 0.00005)
if i % args.check_iter == 0:
torch.save({
'model': self.model.state_dict(),
# 'amp': amp.state_dict()
}, os.path.join(save_path,'amp_checkpoint_{}.pt'.format(i)))
torch.save({
'gen_opt': self.gen_opt,
'gen_adv_opt': self.gen_adv_opt.state_dict(),
'dis_opt': self.dis_adv_opt,
'D': self.D.state_dict(),
}, os.path.join(save_path,'optimizers.pt'))
pbar.update(1)
pbar.set_description(
'g_loss: %.4f, c_loss: %.4f, cycle: %.4f' % (total_loss, reconstruct_loss, tmp_cycle_nll))
def update_temp(self, i, N):
# temperature = np.maximum( np.exp(-self.args.anneal_rate * i), self.args.temperature_min)
return get_fixed_temperature(self.args.temperature, i, N, cfg.temp_adpt)
if __name__ == "__main__":
import argparse
# args.mem_slots, args.num_heads, args.head_size, args.gen_embed_dim, args.gen_hidden_dim
# args.dis_embed_dim, args.max_seq_len, args.num_rep
# args.gen_lr args.gen_adv_lr, args.dis_lr
parser = argparse.ArgumentParser(description='KKDay users')
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--pre-batch-size', type=int, default=48)
parser.add_argument('--clip-norm', type=float, default=1.0)
parser.add_argument('--pretrain-epochs', type=int, default=100)
parser.add_argument('--pretrain-embeddings', type=str, default=None)
parser.add_argument('--iterations', type=int, default=10000)
parser.add_argument('--check-iter', type=int, default=1000, help='checkpoint every 1k')
parser.add_argument('--bleu-iter', type=int, default=400, help='bleu evaluation step')
parser.add_argument('--pretrain-gen', type=str, default=None)
parser.add_argument('--gen-steps', type=int, default=1)
parser.add_argument('--dis-steps', type=int, default=1)
parser.add_argument('--tokenize', '-t', type=str, default='word', choices=['word', 'char'])
parser.add_argument('--name', type=str, default='rec_vae_gan')
parser.add_argument('--tmp-latent-dim', type=int, default=16)
parser.add_argument('--tmp-cat-dim', type=int, default=10)
parser.add_argument('--desc-latent-dim', type=int, default=32)
parser.add_argument('--user-latent-dim', type=int, default=64)
parser.add_argument('--gen-embed-dim', type=int, default=128)
parser.add_argument('--dis-embed-dim', type=int, default=64)
parser.add_argument('--dis-num-layers', type=int, default=5)
parser.add_argument('--max-seq-len', type=int, default=64)
parser.add_argument('--num-rep', type=int, default=64)
parser.add_argument('--temperature-min', type=float, default=0.01)
parser.add_argument('--temperature', type=float, default=1)
parser.add_argument('--gumbel-max', type=float, default=5)
parser.add_argument('--anneal-rate', type=float, default=0.00002)
parser.add_argument('--gen-lr', type=float, default=0.0001)
parser.add_argument('--gen-adv-lr', type=float, default=0.0001)
parser.add_argument('--dis-lr', type=float, default=0.001)
parser.add_argument('--grad-penalty', type=str2bool, nargs='?',
default=False, help='Apply gradient penalty')
parser.add_argument('--full-text', type=str2bool, nargs='?',
default=False, help='Dataset return full max length')
parser.add_argument('--update-latent', type=str2bool, nargs='?',
default=True, help='Update latent assignment every epoch?')
parser.add_argument('--biset',type=str2bool, nargs='?',
default=False, help='Use BiSET module to fuse article/template feature')
parser.add_argument('--dis-weight', type=float, default=0.1)
parser.add_argument('--kl-weight', type=float, default=1.0)
parser.add_argument('--opt-level', type=str, default='O1')
parser.add_argument('--cycle-weight', type=float, default=0.2)
parser.add_argument('--re-weight', type=float, default=0.5)
parser.add_argument('--gp-weight', type=float, default=10)
parser.add_argument('--bin-weight', type=float, default=0.5)
parser.add_argument('--loss-type', type=str, default='rsgan',
choices=['rsgan', 'wasstestein', 'hinge'])
args = parser.parse_args()
trainer = TemplateTrainer(args)
# trainer.pretrain(5)
# trainer.sample_results(None)
# trainer.step(1)
# trainer.dis_step(1)
# trainer.calculate_bleu(None, size=1000)
# trainer.test()
trainer.train()
# trainer.pretrain(args.pretrain_epochs)
# for _ in range(10000):
# total_loss, total_kl_loss, reconstruct_loss, cycle_nll_loss, title_cycle_nll, tmp_cycle_nll = trainer.step()
# print(total_loss, total_kl_loss, reconstruct_loss, cycle_nll_loss, title_cycle_nll, tmp_cycle_nll)
| null |
cli/vae_info_gan_trainer.py
|
vae_info_gan_trainer.py
|
py
| 31,779 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dataset.KKDayUser",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "dataset.KKDayUser",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "dataset.KKDayUser",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "module.vmt.VMT",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "module.vmt.TemplateD",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "module.vmt.QHead",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "dataset.seq_collate",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "dataset.seq_collate",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "torch.nn.NLLLoss",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "constant.Constants.PAD",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "module.vae.GaussianKLLoss",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "dataset.KKDayUser",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "dataset.KKDayUser",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "dataset.seq_collate",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "dataset.seq_collate",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "torch.nn.NLLLoss",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "constant.Constants.PAD",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "dataset.vocab_size",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "numpy.maximum",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "dataset.vocab_size",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "numpy.maximum",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "constant.Constants.EOS",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "dataset.idx2word",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "config.CUDA",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "config.CUDA",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.one_hot",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "torch.argmax",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "torch.argmax",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "utils.get_losses",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "config.CUDA",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "config.CUDA",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.one_hot",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "torch.argmax",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "torch.argmax",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "utils.get_losses",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "config.clip_norm",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "constant.Constants.EOS",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "constant.Constants.EOS",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants",
"line_number": 416,
"usage_type": "name"
},
{
"api_name": "constant.Constants.EOS",
"line_number": 423,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "constant.Constants.EOS",
"line_number": 430,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants",
"line_number": 430,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "dataset.seq_collate",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "config.CUDA",
"line_number": 458,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants.EOS",
"line_number": 477,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "constant.Constants.EOS",
"line_number": 485,
"usage_type": "attribute"
},
{
"api_name": "constant.Constants",
"line_number": 485,
"usage_type": "name"
},
{
"api_name": "nltk.translate.bleu_score.sentence_bleu",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "nltk.translate.bleu_score.SmoothingFunction",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 496,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 496,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 500,
"usage_type": "attribute"
},
{
"api_name": "config.CUDA",
"line_number": 517,
"usage_type": "attribute"
},
{
"api_name": "config.CUDA",
"line_number": 531,
"usage_type": "attribute"
},
{
"api_name": "numpy.maximum",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 541,
"usage_type": "call"
},
{
"api_name": "fasttext.load_model",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 565,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 565,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 567,
"usage_type": "call"
},
{
"api_name": "shutil.copyfile",
"line_number": 568,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 568,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 568,
"usage_type": "attribute"
},
{
"api_name": "shutil.copyfile",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 569,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 571,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "tensorboardX.SummaryWriter",
"line_number": 573,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 581,
"usage_type": "call"
},
{
"api_name": "config.adv_log_step",
"line_number": 594,
"usage_type": "attribute"
},
{
"api_name": "numpy.maximum",
"line_number": 613,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 617,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 620,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 620,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 627,
"usage_type": "attribute"
},
{
"api_name": "utils.get_fixed_temperature",
"line_number": 635,
"usage_type": "call"
},
{
"api_name": "config.temp_adpt",
"line_number": 635,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 644,
"usage_type": "call"
},
{
"api_name": "utils.str2bool",
"line_number": 681,
"usage_type": "name"
},
{
"api_name": "utils.str2bool",
"line_number": 683,
"usage_type": "name"
},
{
"api_name": "utils.str2bool",
"line_number": 685,
"usage_type": "name"
},
{
"api_name": "utils.str2bool",
"line_number": 687,
"usage_type": "name"
},
{
"api_name": "{'KKDayUser': 'dataset.KKDayUser', 'seq_collate': 'dataset.seq_collate', 'datetime': 'datetime.datetime'}",
"line_number": 701,
"usage_type": "call"
}
] |
30709289
|
import discord
from discord.ext import commands
import asyncio
import random
import re
from urllib.parse import quote as uriquote
import sqlite3
FACES = [" ͡° ͜ʖ ͡°", " ͡° ʖ̯ ͡°", " ͠° ͟ʖ ͡°", " ͡ᵔ ͜ʖ ͡ᵔ", " . •́ _ʖ •̀ .", " ఠ ͟ʖ ఠ", " ͡ಠ ʖ̯ ͡ಠ",
" ಠ ʖ̯ ಠ", " ಠ ͜ʖ ಠ", " ͡• ͜ʖ ͡• ", " ・ิ ͜ʖ ・ิ", " ͡ ͜ʖ ͡ ", "≖ ͜ʖ≖", "ʘ ʖ̯ ʘ", "ʘ ͟ʖ ʘ",
"ʘ ͜ʖ ʘ", "* ^ ω ^", "´ ∀ ` *", "◕‿◕。", "≧▽≦", "o^▽^o", "⌒▽⌒", "*⌒―⌒*",
"・∀・", "´。• ω •。`", " ̄ω ̄", "°ε° ", "o・ω・o", "@^◡^", "*・ω・", "^人^", "o´▽`o",
"*´▽`*", " ゚^∀^゚", " ´ ω ` ", "≧◡≦", "´• ω •`", "⌒ω⌒", "*^‿^*", "◕‿◕", "*≧ω≦*",
"。•́‿•̀。", "ー_ー", "´ー` ", "‘~` ", "  ̄д ̄", " ̄ヘ ̄", " ̄~ ̄ ", "ˇヘˇ", "︶▽︶",
"ツ", " ´ д ` ", "︶︿︶", " ˘ 、 ˘ ", " ˘_˘ ", " ᐛ ", "・_・", "⇀_⇀", "¬_¬" ]
SHRUG = r"¯\\\_({})\_/¯"
class Chat(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.custom_command_conn = sqlite3.connect("customcommands.sqlite")
cursor = self.custom_command_conn.cursor()
self.custom_command_cursor = cursor
result = cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='commands';").fetchone()
if not result:
cursor.execute("CREATE TABLE 'commands' ('cmd' TEXT UNIQUE ON CONFLICT REPLACE, 'output' TEXT, 'owner' TEXT);")
self.custom_command_conn.commit()
REPOST = ['\N{REGIONAL INDICATOR SYMBOL LETTER R}',
'\N{REGIONAL INDICATOR SYMBOL LETTER E}',
'\N{REGIONAL INDICATOR SYMBOL LETTER P}',
'\N{REGIONAL INDICATOR SYMBOL LETTER O}',
'\N{REGIONAL INDICATOR SYMBOL LETTER S}',
'\N{REGIONAL INDICATOR SYMBOL LETTER T}',]
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if reaction.emoji == '\N{BLACK UNIVERSAL RECYCLING SYMBOL}\N{VARIATION SELECTOR-16}':
for letter in self.REPOST:
await reaction.message.add_reaction(letter)
@commands.command(name='qp')
async def quickpoll(self, ctx):
"""Add a Checkmark and X to your post for a quick yes-no poll"""
await ctx.message.add_reaction('\N{WHITE HEAVY CHECK MARK}')
await ctx.message.add_reaction('\N{CROSS MARK}')
@commands.command()
async def poll(self, ctx, *, msg):
"""Create a poll using reactions.
!poll 1. cats 2. dogs 3. birds
!poll what's for lunch?
1) pizza
2) chicken
3) starvation
"""
options = re.split("(\d\.|\d\))", msg)
emoji = ['1⃣', '2⃣', '3⃣', '4⃣', '5⃣', '6⃣', '7⃣', '8⃣', '9⃣']
for opt in options[1:]:
try:
number = int(opt[0])
await ctx.message.add_reaction(emoji[number-1])
except:
pass
@commands.command()
async def translate(self, ctx, *, phrase: str):
"""Translate short phrases using google translate
Optionally specify language code such as `!translate en-es cat`"""
langs = re.search(r"(\w{2})-(\w{2})", phrase[0:5])
if langs:
sl = langs.group(1)
tl = langs.group(2)
phrase = phrase[6:]
else:
sl = "auto"
tl = "en"
url = "https://translate.googleapis.com/translate_a/single"
params = {'client': 'gtx', 'sl': sl, 'tl': tl, 'dt': 't', "q": phrase}
ua = "Mozilla/5.0 (X11; CrOS x86_64 12239.19.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.38 Safari/537.36"
headers = {'User-Agent': ua}
async with self.bot.session.get(url, headers=headers, params=params) as resp:
result = await resp.json()
await ctx.send("{} ({}): {}".format(result[0][0][1], result[2], result[0][0][0]))
@commands.Cog.listener()
async def on_message(self, message):
self.bot.logger.debug(message)
self.bot.logger.debug(message.content)
out = ''
prefix = self.bot.command_prefix
lower = message.content.lower()
if lower.startswith('bot '):
decide = self.decider(message.clean_content[4:])
if decide:
out = f"{message.author.mention}: {decide}"
elif "shrug" in lower:
out = self.shrug()
elif message.content[:1] in prefix:
cmd = lower[1:].split(" ")[0]
out = await self.custom_command(cmd)
if out:
ctx = await self.bot.get_context(message, cls=self.bot.utils.MoreContext)
await ctx.send(out)
def shrug(self):
return SHRUG.format(random.choice(FACES))
@commands.command(name="bot")
async def decide(self, ctx, *, line:str):
"""Decide things"""
out = f"{ctx.author.mention}: {self.decider(line)}"
await ctx.send(out)
def decider(self, msg):
things = re.split(", or |, | or ", msg, flags=re.IGNORECASE)
if len(things) > 1:
return random.choice(things).strip()
async def custom_command(self, command):
c = self.custom_command_cursor
result = c.execute("SELECT output FROM commands WHERE cmd = (?)", [command.lower()]).fetchone()
if not result:
return
else:
return result[0].strip()
@commands.command()
@commands.has_role('Admins')
async def addcmd(self, ctx, cmd, *, output: str):
"""Adds a custom command to the bot that will output whatever is in the <output> field"""
#Currently hard insert so can be used to edit too
owner = str(ctx.author)
c = self.custom_command_cursor
conn = self.custom_command_conn
c.execute("INSERT INTO commands VALUES (?,?,?)", (cmd.lower(), output, owner))
conn.commit()
@commands.command()
@commands.has_role('Admins')
async def delcmd(self, ctx, cmd: str):
c = self.custom_command_cursor
conn = self.custom_command_conn
c.execute("DELETE FROM commands WHERE cmd = (?)", [cmd.lower()])
conn.commit()
def setup(bot):
bot.add_cog(Chat(bot))
| null |
modules/chat.py
|
chat.py
|
py
| 6,517 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.ext.commands.Cog",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "re.split",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "re.search",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "re.split",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_role",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_role",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 152,
"usage_type": "name"
}
] |
554537136
|
# coding: utf-8
from __future__ import unicode_literals
from sqlalchemy.orm import joinedload, joinedload_all
from clld.db.meta import DBSession
from clld.db.util import get_distinct_values
from clld.db.models.common import Value, Contribution, ValueSet, Parameter, Language
from clld.web.util.helpers import external_link, linked_references
from clld.web.datatables.base import Col, IdCol, LinkCol, LinkToMapCol, DataTable
from clld.web.datatables.language import Languages
from clld.web.datatables.parameter import Parameters
from clld.web.datatables.value import Values
from clld.web.datatables.contribution import Contributions
from clld.web.datatables.source import Sources
from clld_glottologfamily_plugin.datatables import MacroareaCol, FamilyLinkCol
from clld_glottologfamily_plugin.models import Family
from lexibank.models import (
LexibankLanguage, Counterpart, Concept, Provider, LexibankSource,
CounterpartReference, Cognateset,
)
class LexibankSources(Sources):
def base_query(self, query):
query = Sources.base_query(self, query)
query = query.join(LexibankSource.provider).options(joinedload(LexibankSource.provider))
return query
def col_defs(self):
cols = Sources.col_defs(self)
provider = LinkCol(
self,
'provider',
choices=get_distinct_values(Provider.name),
model_col=Provider.name,
get_object=lambda i: i.provider)
return cols[:-1] + [provider]
class MaybeLinkCol(LinkCol):
def format(self, item):
obj = self.get_obj(item)
if obj:
return LinkCol.format(self, item)
return ''
class RefsCol(Col):
__kw__ = dict(bSearchable=False, bSortable=False)
def format(self, item):
return linked_references(self.dt.req, item)
class Counterparts(Values):
def base_query(self, query):
query = query.join(ValueSet).options(
joinedload(Value.valueset),
joinedload_all(Counterpart.references, CounterpartReference.source)
)
if self.language:
query = query \
.join(ValueSet.parameter) \
.join(ValueSet.contribution) \
.options(
joinedload(Value.valueset, ValueSet.contribution),
joinedload(Value.valueset, ValueSet.parameter))
return query.filter(ValueSet.language_pk == self.language.pk)
if self.parameter:
query = query \
.join(ValueSet.language) \
.outerjoin(LexibankLanguage.family) \
.options(joinedload_all(
Value.valueset, ValueSet.language, LexibankLanguage.family))
return query.filter(ValueSet.parameter_pk == self.parameter.pk)
if self.contribution:
query = query.join(ValueSet.parameter)
return query.filter(ValueSet.contribution_pk == self.contribution.pk)
return query \
.join(ValueSet.parameter)\
.join(ValueSet.language)\
.options(
joinedload(Value.valueset, ValueSet.parameter),
joinedload(Value.valueset, ValueSet.language),
)
def col_defs(self):
if self.parameter:
return [
LinkCol(self, 'form', model_col=Counterpart.name),
LinkCol(
self,
'language',
model_col=LexibankLanguage.name,
get_object=lambda i: i.valueset.language),
MaybeLinkCol(
self,
'family',
model_col=Family.name,
get_object=lambda i: i.valueset.language.family),
Col(self, 'variety', model_col=Counterpart.variety_name),
#Col(self, 'loan', model_col=Counterpart.loan),
RefsCol(self, 'source'),
]
if self.language:
return [
LinkCol(self, 'form', model_col=Counterpart.name),
LinkCol(
self,
'concept',
model_col=Concept.name,
get_object=lambda i: i.valueset.parameter),
Col(self, 'variety', model_col=Counterpart.variety_name),
LinkCol(
self,
'provider',
model_col=Contribution.name,
get_object=lambda i: i.valueset.contribution),
RefsCol(self, 'source'),
]
return [
LinkCol(self, 'form', model_col=Value.name),
Col(self, 'context', model_col=Counterpart.context),
LinkCol(
self,
'language',
model_col=Language.name,
get_object=lambda i: i.valueset.language),
LinkCol(
self,
'concept',
model_col=Parameter.name,
get_object=lambda i: i.valueset.parameter),
]
#class FeatureIdCol(IdCol):
# def search(self, qs):
# if self.model_col:
# return self.model_col.contains(qs)
# def order(self):
# return Feature.sortkey_str, Feature.sortkey_int
class LanguageIdCol(LinkCol):
def get_attrs(self, item):
return dict(label=item.id)
class LexibankLanguages(Languages):
__constraints__ = [Contribution]
def base_query(self, query):
if self.contribution:
sq = DBSession.query(ValueSet.language_pk)\
.filter(ValueSet.contribution_pk == self.contribution.pk)\
.distinct()\
.subquery()
query = query.filter(LexibankLanguage.pk.in_(sq))
return query.outerjoin(Family).options(joinedload(LexibankLanguage.family))
def col_defs(self):
return [
LanguageIdCol(self, 'id'),
LinkCol(self, 'name'),
LinkToMapCol(self, 'm'),
Col(self,
'latitude',
sDescription='<small>The geographic latitude</small>'),
Col(self,
'longitude',
sDescription='<small>The geographic longitude</small>'),
MacroareaCol(self, 'macroarea', LexibankLanguage),
FamilyLinkCol(self, 'family', LexibankLanguage),
]
class ConcepticonLink(Col):
__kw__ = {'bSearchable': False, 'bSortable': False}
def format(self, item):
return external_link(item.concepticon_url)
class Concepts(Parameters):
def col_defs(self):
return [
IdCol(self, 'id'),
LinkCol(self, 'name', sTitle='Concept'),
Col(self, 'Languages', model_col=Concept.representation),
Col(self, 'semantic_field', model_col=Concept.semanticfield, choices=get_distinct_values(Concept.semanticfield)),
ConcepticonLink(self, 'Concepticon'),
]
class Providers(Contributions):
def col_defs(self):
return [
IdCol(self, 'id'),
LinkCol(self, 'name'),
Col(self, 'cite', model_col=Contribution.description),
Col(self, 'language_count', sTitle='# languages', model_col=Provider.language_count),
Col(self, 'parameter_count', sTitle='# concepts', model_col=Provider.parameter_count),
Col(self,
'lexeme_count',
sTitle='# lexemes',
model_col=Provider.lexeme_count,
format=lambda i: '{:,}'.format(i.lexeme_count)),
Col(self,
'synonymy',
sDescription=Provider.synonym_index.doc,
model_col=Provider.synonym_index,
format=lambda i: '{:.3f}'.format(i.synonym_index))
]
class ProviderCol(LinkCol):
def __init__(self, dt, name, **kw):
kw['model_col'] = Contribution.name
kw['choices'] = [(p.id, p.name) for p in DBSession.query(Provider)]
LinkCol.__init__(self, dt, name, **kw)
def search(self, qs):
return Contribution.id == qs
class Cognatesets(DataTable):
def base_query(self, query):
return query.join(Cognateset.contribution)\
.options(joinedload(Cognateset.contribution))
def col_defs(self):
return [
IdCol(self, 'id'),
LinkCol(self, 'name'),
Col(self, 'cognates', model_col=Cognateset.representation),
ProviderCol(
self,
'provider',
get_object=lambda i: i.contribution),
]
def includeme(config):
config.register_datatable('cognatesets', Cognatesets)
config.register_datatable('languages', LexibankLanguages)
config.register_datatable('contributions', Providers)
config.register_datatable('parameters', Concepts)
config.register_datatable('values', Counterparts)
config.register_datatable('sources', LexibankSources)
| null |
lexibank/datatables.py
|
datatables.py
|
py
| 8,987 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "clld.web.datatables.source.Sources",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.source.Sources.base_query",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.source.Sources",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "lexibank.models.LexibankSource.provider",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.LexibankSource",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.joinedload",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.source.Sources.col_defs",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.source.Sources",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "clld.db.util.get_distinct_values",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Provider.name",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Provider",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "lexibank.models.Provider.name",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Provider",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol.format",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "clld.web.util.helpers.linked_references",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.value.Values",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 61,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.orm.joinedload",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Value.valueset",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Value",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.joinedload_all",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Counterpart.references",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Counterpart",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "lexibank.models.CounterpartReference.source",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.CounterpartReference",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.parameter",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.contribution",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.joinedload",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Value.valueset",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Value",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.contribution",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.joinedload",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Value.valueset",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Value",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.parameter",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.language_pk",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.language",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "lexibank.models.LexibankLanguage.family",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.LexibankLanguage",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.joinedload_all",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Value.valueset",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Value",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.language",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "lexibank.models.LexibankLanguage.family",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.LexibankLanguage",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.parameter_pk",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.parameter",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.contribution_pk",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.parameter",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.language",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.joinedload",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Value.valueset",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Value",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.parameter",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.joinedload",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Value.valueset",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Value",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.language",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Counterpart.name",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Counterpart",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "lexibank.models.LexibankLanguage.name",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.LexibankLanguage",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "clld_glottologfamily_plugin.models.Family.name",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "clld_glottologfamily_plugin.models.Family",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Counterpart.variety_name",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Counterpart",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Counterpart.name",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Counterpart",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Concept.name",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Concept",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Counterpart.variety_name",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Counterpart",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Contribution.name",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Contribution",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Value.name",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Value",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Counterpart.context",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Counterpart",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Language.name",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Language",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Parameter.name",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Parameter",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.language.Languages",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.Contribution",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "clld.db.meta.DBSession.query",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "clld.db.meta.DBSession",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.language_pk",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.ValueSet.contribution_pk",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.ValueSet",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "lexibank.models.LexibankLanguage.pk.in_",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "lexibank.models.LexibankLanguage.pk",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.LexibankLanguage",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "clld_glottologfamily_plugin.models.Family",
"line_number": 169,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.orm.joinedload",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "lexibank.models.LexibankLanguage.family",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.LexibankLanguage",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.LinkToMapCol",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "clld_glottologfamily_plugin.datatables.MacroareaCol",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "lexibank.models.LexibankLanguage",
"line_number": 182,
"usage_type": "argument"
},
{
"api_name": "clld_glottologfamily_plugin.datatables.FamilyLinkCol",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "lexibank.models.LexibankLanguage",
"line_number": 183,
"usage_type": "argument"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "clld.web.util.helpers.external_link",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.parameter.Parameters",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.IdCol",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Concept.representation",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Concept",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Concept.semanticfield",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Concept",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "clld.db.util.get_distinct_values",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.contribution.Contributions",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.IdCol",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "clld.db.models.common.Contribution.description",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Contribution",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Provider.language_count",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Provider",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Provider.parameter_count",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Provider",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Provider.lexeme_count",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Provider",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Provider.synonym_index",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Provider",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "lexibank.models.Provider.synonym_index",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Provider",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.Contribution.name",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Contribution",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "clld.db.meta.DBSession.query",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Provider",
"line_number": 229,
"usage_type": "argument"
},
{
"api_name": "clld.db.meta.DBSession",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.LinkCol.__init__",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "clld.db.models.common.Contribution.id",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "clld.db.models.common.Contribution",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.DataTable",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "lexibank.models.Cognateset.contribution",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Cognateset",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.joinedload",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Cognateset.contribution",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Cognateset",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "clld.web.datatables.base.IdCol",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.LinkCol",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "clld.web.datatables.base.Col",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "lexibank.models.Cognateset.representation",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "lexibank.models.Cognateset",
"line_number": 245,
"usage_type": "name"
}
] |
650696173
|
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from application import settings
from core.models import WatchableModel, User
from event.models import add_event_for_object
from .models import ModelWithAuthor, Like
from .tasks import send_confirmation_email_task
from django.db import transaction
# transaction.on_commit(lambda: dewfewhfwe)
def model_with_author_post_save(instance, created=False, *args, **kwargs):
if created:
instance.author.objects_count += 1
instance.author.save()
@receiver(post_save, sender=Like)
def like_post_save(instance, created, *args, **kwargs):
if created:
instance.object.likes_count += 1
instance.object.save()
@receiver(pre_delete, sender=Like)
def like_pre_delete(instance, *args, **kwargs):
instance.object.likes_count -= 1
if 0 > instance.object.likes_count:
instance.object.likes_count = 0
instance.object.save()
def watchable_object_post_save(instance, created, *args, **kwargs):
if instance.is_tracked():
# we create new event on each object edit
add_event_for_object(instance, created=created)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
@receiver(post_save, sender=User)
def confirm_user_email(instance, created, *args, **kwargs):
if created:
transaction.on_commit(lambda: send_confirmation_email_task.apply_async([instance.id], {}))
# for user in User.objects.all():
# Token.objects.get_or_create(user=user)
for model in WatchableModel.__subclasses__():
post_save.connect(watchable_object_post_save, model)
for model in ModelWithAuthor.__subclasses__():
post_save.connect(model_with_author_post_save, model)
# post_save.connect()
# for model in WatchableModel.__subclasses__():
# post_save.connect(m)
| null |
core/signals.py
|
signals.py
|
py
| 2,005 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.dispatch.receiver",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "models.Like",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.pre_delete",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "models.Like",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "event.models.add_event_for_object",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "rest_framework.authtoken.models.Token.objects.create",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "rest_framework.authtoken.models.Token.objects",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.authtoken.models.Token",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "application.settings.AUTH_USER_MODEL",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "application.settings",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.on_commit",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.db.transaction",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "tasks.send_confirmation_email_task.apply_async",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tasks.send_confirmation_email_task",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "core.models.User",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "core.models.WatchableModel.__subclasses__",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "core.models.WatchableModel",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.db.models.signals.post_save.connect",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "models.ModelWithAuthor.__subclasses__",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "models.ModelWithAuthor",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.db.models.signals.post_save.connect",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 61,
"usage_type": "name"
}
] |
499609773
|
# -------------------------------------------------------
# Merge Sorted Array - https://leetcode.com/problems/merge-sorted-array/
# -------------------------------------------------------
# Author: Arshad Mehmood
# Github: https://github.com/arshad115
# Blog: https://arshadmehmood.com
# LinkedIn: https://www.linkedin.com/in/arshadmehmood115
# Date : 2020-07-22
# Project: 100-days-of-leetcode
# -------------------------------------------------------
import sys
from math import inf
from statistics import median
from typing import List
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
j, k, total = 0,0,0
temp = []
while j<=m or k <= n:
if j<m:
if k < n and nums1[j] <= nums2[k]:
temp.append(nums1[j])
j+=1
total +=1
elif k < n and nums1[j] > nums2[k]:
temp.append(nums2[k])
k += 1
total += 1
elif total<m+n:
temp.append(nums1[j])
j += 1
total += 1
elif k < n:
if j < m and nums1[j] > nums2[k]:
temp.append(nums2[k])
k += 1
total +=1
elif j < m and nums1[j] < nums2[k]:
temp.append(nums1[j])
j += 1
total +=1
elif total<m+n:
temp.append(nums2[k])
k += 1
total += 1
else:
break
nums1[:] = temp
solution = Solution()
nums1 = [1, 2, 3, 5, 7, 15]
m = 6
nums2 = [2, 3 ,4 ,5]
n = 4
solution.merge(nums1, m, nums2, n)
print(nums1)
| null |
codes/2020-07-22-merge-sorted-array.py
|
2020-07-22-merge-sorted-array.py
|
py
| 1,908 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
}
] |
535961985
|
"""
Python Makefile System
"""
import os
import sys
import argparse
from functools import wraps, partial
from distutils import log
from pake.config import PAKEFILE_NAME
from pake.globals import app, context
from pake.exceptions import PakeError
class Task(object):
"""
A decorator make a function be a sub-command of pake.
A task function should have 1 argument 'task' at least.
"""
def __init__(self, prerequisites=[], target=None, default=False):
self.target = target
self.prerequisites = prerequisites
self.default = default
self.help = None
self.options = []
self.invoked = False
def __call__(self, func):
if isinstance(func, Option):
self.options = func.options
func = func.func
if self.target is None:
self.target = func.__name__
if func.__doc__:
self.help = func.__doc__.strip()
else:
self.help = self.target
parser = app.subparser.add_parser(self.target, help=self.help)
for args, kwargs in self.options:
parser.add_argument(*args, **kwargs)
self.parser = parser
helper = partial(func, self)
self.func = wraps(func)(helper)
context.add_task(self)
return self
def __repr__(self):
return self.target
def execute(self, argv=[]):
self.option, self.argv = self.parser.parse_known_args(argv[1:])
self.func()
self.invoked = True
def task(*args, **kwargs):
return Task(*args, **kwargs)
class Option(object):
def __init__(self, *args, **kwargs):
self.options = [(args, kwargs)]
def __call__(self, func):
if isinstance(func, Option):
self.options += func.options
self.func = func.func
else:
self.func = func
return self
def option(*args, **kwargs):
return Option(*args, **kwargs)
class Rule(object):
def __init__(self, target_suffix, source_suffix):
self.target_suffix = target_suffix[1:]
self.source_suffix = source_suffix[1:]
def __call__(self, func):
self.func = func
context.add_rule(self)
def rule(target, source):
return Rule(target, source)
| null |
lib/pake/pake/core.py
|
core.py
|
py
| 1,963 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pake.globals.app.subparser.add_parser",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pake.globals.app.subparser",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pake.globals.app",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pake.globals.context.add_task",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pake.globals.context",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "pake.globals.context.add_rule",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pake.globals.context",
"line_number": 84,
"usage_type": "name"
}
] |
437741559
|
"""
sqlite3 context manager and try-except error handling
"""
import sqlite3
def create_table(db):
try:
with db:
cur = db.cursor()
cur.execute('create table phones (brand text, version int)')
except sqlite3.Error as e:
print(f'error creating table because {e}')
def add_test_data(db):
try:
with db:
cur = db.cursor()
cur.execute('insert into phones values ("Android", 5)')
cur.execute('insert into phones values ("iPhone", 6)')
except sqlite3.Error:
print('Error adding rows')
def print_all_data(db):
# Execute a query. Do not need a context manager, as no changes are being made to the DB
try:
cur = db.cursor() # Need a cursor object to perform operations
for row in cur.execute('select * from phones'):
print(row)
except sqlite3.Error as e:
print(f'Error selecting data from phones table because {e}')
def delete_table(db):
try:
with db:
cur = db.cursor()
cur.execute('drop table phones') # Delete table
except sqlite3.Error as e:
print(f'Error deleting phones table because {e}')
def main():
db = None
try:
db = sqlite3.connect('my_first_db.db')
except sqlite3.Error as e:
print(f'Unable to connect to database because {e}.')
if db is not None:
create_table(db)
add_test_data(db)
print_all_data(db)
delete_table(db)
try:
db.close()
except sqlite3.Error:
print(f'Error closing database because {e}')
if __name__ == '__main__':
main()
| null |
sqlite/db_context_manager_error_handling.py
|
db_context_manager_error_handling.py
|
py
| 1,673 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlite3.Error",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.Error",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.Error",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.Error",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sqlite3.Error",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.Error",
"line_number": 67,
"usage_type": "attribute"
}
] |
104167045
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import cv2
import shy
import os
import os.path as osp
from train import Net
shy.err_hook()
CUR_DIR = osp.dirname(__file__)
DATA_DIR = osp.join(CUR_DIR, 'data')
WEIGHT_PATH = osp.join(CUR_DIR, 'grad-cam-weight.pt')
if not osp.isfile(WEIGHT_PATH):
print('Use pretrained weight')
shy.download_url('https://www.dropbox.com/s/9c3w8aenwpw4lf7/grad-cam-weight.pt?dl=1',
WEIGHT_PATH)
net = Net()
net.load_state_dict(torch.load(WEIGHT_PATH))
net.eval().cuda()
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(DATA_DIR, train=False, transform=transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])),
batch_size=1)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def tensor2img(t):
img = t[0].permute(1, 2, 0).cpu().numpy()
img *= np.array([0.229, 0.224, 0.225])
img += np.array([0.485, 0.456, 0.406])
img *= 255
return img.astype(np.uint8)
def vis(cam):
cam = cv2.resize(cam, dsize=(224, 224), interpolation=cv2.INTER_LINEAR)
plt.subplot(131)
plt.imshow(tensor2img(data))
plt.subplot(132)
plt.imshow(cam, cmap='jet', alpha=0.2)
plt.subplot(133)
plt.imshow(tensor2img(data))
plt.imshow(cam, cmap='jet', alpha=0.2)
plt.show()
# calc gradCAM
def get_gradCAM(module, grad_input, grad_output):
A = grad_output[0]
a = A.mean(dim=3).mean(dim=2)
b, c = a.shape
L = a.reshape(b, c, 1, 1) * A
L = F.relu(L.sum(dim=1))
L = L.detach().cpu().numpy()
L = L / L.max()
Ls.append(L[0])
# register hook at layer which you want to look gradCAM of
net.backbone.layer4[2].conv3.register_backward_hook(get_gradCAM)
for data, target in test_loader:
data = data.cuda()
output = net(data)
pred = output.argmax(dim=1)
output = output[0]
pred = pred[0]
target = target[0]
Ls = []
output[pred.item()].backward()
vis(Ls[0])
| null |
grad-CAM/vis-simple.py
|
vis-simple.py
|
py
| 2,369 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "shy.err_hook",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "shy.download_url",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "train.Net",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_LINEAR",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 72,
"usage_type": "name"
}
] |
506219078
|
"""Code snippets vol-44-snip-220
220-Difference between two dates
stevepython.wordpress.com
Download all snippets so far:
https://wp.me/Pa5TU8-1yg
Requirements:
None
source:
https://gist.github.com/amalgjose/c767a4846d6ecaa3b6d7
"""
from datetime import datetime
from dateutil import relativedelta
##Aug 7 1989 8:10 pm
date_1 = datetime(1989, 8, 7, 20, 10)
##Dec 5 1990 5:20 am
date_2 = datetime(1990, 12, 5, 5, 20)
#This will find the difference between the two dates
difference = relativedelta.relativedelta(date_2, date_1)
years = difference.years
months = difference.months
days = difference.days
hours = difference.hours
minutes = difference.minutes
print()
print(date_1, ' and ', date_2)
print("Difference is %s year, %s months, %s days, %s hours, %s minutes "
%(years, months, days, hours, minutes))
| null |
Python-code-snippets-201-300/220-Difference between two dates.py
|
220-Difference between two dates.py
|
py
| 856 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "dateutil.relativedelta.relativedelta",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "dateutil.relativedelta",
"line_number": 25,
"usage_type": "name"
}
] |
97397963
|
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.urls import reverse
from django.utils import timezone
from characters.forms import CreateCharacterForm
from characters.models import Character, BasicStats
from guardian.shortcuts import assign_perm
from powers.models import Power_Full
def create_character(request):
if request.method == 'POST':
form = CreateCharacterForm(request.POST)
if form.is_valid():
stats = BasicStats(stats=form.cleaned_data['stats'])
stats.save()
character = Character(name=form.cleaned_data['name'],
tagline=form.cleaned_data['tagline'],
appearance=form.cleaned_data['appearance'],
player=request.user,
age=form.cleaned_data['age'],
sex=form.cleaned_data['sex'],
concept_summary=form.cleaned_data['concept'],
ambition=form.cleaned_data['ambition'],
pub_date=timezone.now(),
edit_date=timezone.now(),
private=form.cleaned_data['private'],
basic_stats = stats)
character.save()
return HttpResponseRedirect(reverse('characters:characters_view', args=(character.id,)))
else:
print(form.errors)
return None
else:
# Build a character form.
form = CreateCharacterForm()
context = {
'form' : form,
}
return render(request, 'characters/edit_character.html', context)
def edit_character(request, character_id):
character = get_object_or_404(Character, id=character_id)
if request.method == 'POST':
if not request.user.has_perm('edit_character', character):
return HttpResponseForbidden()
if character.private and not request.user.has_perm("view_private_character", character):
return HttpResponseForbidden()
form = CreateCharacterForm(request.POST)
if form.is_valid():
if character.private != form.cleaned_data['private']:
for power_full in character.power_full_set.all():
power_full.set_self_and_children_privacy(is_private=form.cleaned_data['private'])
character.name=form.cleaned_data['name']
character.tagline=form.cleaned_data['tagline']
character.appearance=form.cleaned_data['appearance']
character.age=form.cleaned_data['age']
character.sex=form.cleaned_data['sex']
character.concept_summary=form.cleaned_data['concept']
character.ambition=form.cleaned_data['ambition']
character.pub_date=timezone.now()
character.edit_date=timezone.now()
character.private=form.cleaned_data['private']
character.save()
stats = character.basic_stats
stats.stats=form.cleaned_data['stats']
stats.save()
return HttpResponseRedirect(reverse('characters:characters_view', args=(character.id,)))
else:
print(form.errors)
return None
else:
# Build a character form.
form = CreateCharacterForm(initial={'name': character.name,
'tagline': character.tagline,
'appearance': character.appearance,
'age': character.age,
'sex': character.sex,
'concept': character.concept_summary,
'ambition': character.ambition,
'private': character.private,
'stats': character.basic_stats.stats,})
context = {
'character': character,
'form' : form,
}
return render(request, 'characters/edit_character.html', context)
def view_character(request, character_id):
character = get_object_or_404(Character, id=character_id)
if character.private and not request.user.has_perm("view_private_character", character):
return HttpResponseForbidden()
context = {
'character': character,
}
return render(request, 'characters/view_character.html', context)
def choose_powers(request, character_id):
character = get_object_or_404(Character, id=character_id)
if not request.user.has_perm('edit_character', character):
return HttpResponseForbidden()
assigned_powers = character.power_full_set.all()
unassigned_powers = request.user.power_full_set.filter(character=None).order_by('-pub_date').all()
context = {
'character': character,
'assigned_powers': assigned_powers,
'unassigned_powers': unassigned_powers,
}
return render(request, 'characters/choose_powers.html', context)
def toggle_power(request, character_id, power_full_id):
character = get_object_or_404(Character, id=character_id)
power_full = get_object_or_404(Power_Full, id=power_full_id)
if not ( request.user.has_perm('edit_character', character) and request.user.has_perm('edit_power_full', power_full)):
return HttpResponseForbidden()
if power_full.character == character:
power_full.character = None
power_full.save()
power_full.set_self_and_children_privacy(is_private=False)
elif not power_full.character:
power_full.character = character
power_full.save()
power_full.set_self_and_children_privacy(is_private=character.private)
return HttpResponseRedirect(reverse('characters:characters_power_picker', args=(character.id,)))
| null |
hgapp/characters/views.py
|
views.py
|
py
| 6,043 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "characters.forms.CreateCharacterForm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "characters.models.BasicStats",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "characters.models.Character",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "characters.forms.CreateCharacterForm",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "characters.models.Character",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "django.http.HttpResponseForbidden",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseForbidden",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "characters.forms.CreateCharacterForm",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "characters.forms.CreateCharacterForm",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "characters.models.Character",
"line_number": 96,
"usage_type": "argument"
},
{
"api_name": "django.http.HttpResponseForbidden",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "characters.models.Character",
"line_number": 105,
"usage_type": "argument"
},
{
"api_name": "django.http.HttpResponseForbidden",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "characters.models.Character",
"line_number": 118,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "powers.models.Power_Full",
"line_number": 119,
"usage_type": "argument"
},
{
"api_name": "django.http.HttpResponseForbidden",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 130,
"usage_type": "call"
}
] |
222253029
|
import cv2
import numpy as np
num = input("Type 0 or 1 to show image \n")
print(num)
path = "images/sample{}.jpg"
img = cv2.imread(path.format(num))
kernelSize = 5
kernel = np.ones((5,5),np.uint8)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
blur_gray = cv2.GaussianBlur(gray,(kernelSize,kernelSize),0)
ret,thresh = cv2.threshold(hsv, 230,255, cv2.THRESH_BINARY)
thresh = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel)
#cv2.imshow("",thresh)
range = cv2.inRange(thresh, (0,0,127),(0,0,255))
edges = cv2.Canny(range, 100, 300,apertureSize=3, L2gradient=True)
#cv2.imshow("edge",edges)
closed = cv2.morphologyEx(edges,cv2.MORPH_CLOSE,kernel)
#cv2.imshow("closed",closed)
minLLength = 100
maxLGap = 15
lines = cv2.HoughLinesP(closed, 1, np.pi/180, 100, minLineLength=minLLength, maxLineGap=maxLGap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img,(x1, y1),(x2, y2),(127,255,127),2)
cv2.imshow("lines", img)
cv2.imwrite("output.jpg",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| null |
source.py
|
source.py
|
py
| 1,039 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CLOSE",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.inRange",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CLOSE",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.HoughLinesP",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.line",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 31,
"usage_type": "call"
}
] |
95312101
|
from collections import namedtuple
from statistics import mean
from functools import wraps
from util import count_syllables
from tagging import AnnotatedDocument, AnnotatedSentence, AnnotatedParagraph
Metric = namedtuple(
'Metric',
('code', 'name', 'description'),
)
class MetricRegistry:
def __init__(self, measured_cls):
self.measured_cls = measured_cls
self._metrics = []
def __iter__(self):
return iter(self._metrics)
def register(self, func, metric):
"""
Register a function as a metric.
It should take a `measured_cls` object and return a number.
"""
func.meta = metric
self._metrics.append(func)
def metric(self, code, name, description=''):
"""
Create a decorator that turns a function into a metric for
`measured_cls`.
The function must take a `measured_cls` object and return a number.
The decorator will add the decorated function to the registry
so it gets picked up by the runner.
"""
def decorator(f):
@wraps(f)
def decorated(obj):
if not isinstance(obj, self.measured_cls):
raise TypeError(
"{} expected type {}, but got {}".format(
f.__name__,
self.measured_class.__name__,
type(obj).__name__
)
)
return f(obj)
self.register(
decorated,
Metric(
code=code,
name=name,
description=description
)
)
return decorated
return decorator
# Registries
document_metrics = MetricRegistry(AnnotatedDocument)
paragraph_metrics = MetricRegistry(AnnotatedParagraph)
sentence_metrics = MetricRegistry(AnnotatedSentence)
# Decorators
document_metric = document_metrics.metric
paragraph_metric = paragraph_metrics.metric
sentence_metric = sentence_metrics.metric
@paragraph_metric(code='readability:flesch', name='Flesch readability score')
def flesch_score(paragraph):
"""
Readability score
0 = Very difficult
60 = Plain english
100 = Very easy
"""
sentences = paragraph.sentences
num_sentences = len(sentences)
num_words = 0
num_syllables = 0
for sentence in sentences:
for word, pronunciations in sentence.pronunciation_tags:
if pronunciations:
num_syllables += mean(
count_syllables(tag) for tag in pronunciations
)
num_words += 1
else:
# TODO: this is for debugging only
print(word)
return (
206.835
- (1.015 * num_words / num_sentences)
- (84.6 * num_syllables / num_words)
)
| null |
writerlint/metrics.py
|
metrics.py
|
py
| 2,943 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.namedtuple",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tagging.AnnotatedDocument",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "tagging.AnnotatedParagraph",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "tagging.AnnotatedSentence",
"line_number": 68,
"usage_type": "argument"
},
{
"api_name": "statistics.mean",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "util.count_syllables",
"line_number": 92,
"usage_type": "call"
}
] |
172920357
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
### GT ID: cchen376
### Assignment #1 CS7641 Fall 2019
import pandas as pd
import numpy as np
import time
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA,FastICA
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report,confusion_matrix
from sklearn.metrics.pairwise import pairwise_distances
from matplotlib.ticker import FuncFormatter
from sklearn.random_projection import SparseRandomProjection, GaussianRandomProjection
import matplotlib
import seaborn as sns
from itertools import product
import warnings
from collections import defaultdict
warnings.filterwarnings('ignore')
from helperfuncs_hw3 import *
import sys
# In[2]:
seed=0
df_elec=pd.read_csv('Data_for_UCI_named.csv')
df_elec['stabf']=np.where(df_elec['stabf']=='unstable',0,1)
y_elec=df_elec['stabf']
X_elec=df_elec.drop(['stabf','stab'],axis=1)
X_elec=StandardScaler().fit_transform(X_elec)
df_def = pd.read_csv('default_data_cleaned.csv')
y_def=df_def['default payment next month']
X_def=df_def.drop(['default payment next month','ID','PAY_0'],axis=1)
X_def_dummies = pd.get_dummies(X_def,columns = ['SEX','EDUCATION','MARRIAGE'],drop_first=[True,True,True])
X_def=StandardScaler().fit_transform(X_def_dummies)
# # ICA
# In[3]:
X_train_elec, X_test_elec, y_train_elec, y_test_elec= train_test_split(X_elec,y_elec,test_size=.2,random_state=seed,stratify=y_elec)
X_train_def, X_test_def, y_train_def, y_test_def= train_test_split(X_def,y_def,test_size=.2,random_state=seed,stratify=y_def)
# In[22]:
kurt_elec = {}
nn_f1_score=[]
dim_list_elec =[i+2 for i in range(X_elec.shape[1]-1)]
for i in dim_list_elec:
ica_elec = FastICA(n_components=i,random_state=seed)
tmp = ica_elec.fit_transform(X_elec)
tmp = pd.DataFrame(tmp)
X_train_elec, X_test_elec, y_train_elec, y_test_elec= train_test_split(tmp,y_elec,test_size=.2,random_state=seed,stratify=y_elec)
clf_elec = MLPClassifier(hidden_layer_sizes= (100, 10))
clf_elec.fit(X_train_elec,y_train_elec)
y_pred_elec=clf_elec.predict(X_test_elec)
nn_f1_score.append(f1_score(y_test_elec,y_pred_elec))
tmp = tmp.kurt(axis=0)
kurt_elec[i] = tmp.abs().mean()
kurt_elec = list(pd.Series(kurt_elec))
plot_DR(x=dim_list_elec,
ax1_data=kurt_elec,
ax2_data=nn_f1_score,
method='ICA',
data='Electrical Grid',
ylabel1='Kurtosis',
ylabel2='F1 Score (Neural Net)',
ax2none=False)
# In[23]:
kurt_def = {}
nn_f1_score=[]
dim_list_def =[i+2 for i in range(X_def.shape[1]-1)]
for i in dim_list_def:
ica_def = FastICA(n_components=i,random_state=seed)
tmp = ica_def.fit_transform(X_def)
tmp = pd.DataFrame(tmp)
X_train_def, X_test_def, y_train_def, y_test_def= train_test_split(tmp,y_def,test_size=.2,random_state=seed,stratify=y_def)
clf_def= MLPClassifier(hidden_layer_sizes= (50,5))
clf_def.fit(X_train_def,y_train_def)
y_pred_def=clf_def.predict(X_test_def)
nn_f1_score.append(f1_score(y_test_def,y_pred_def))
tmp = tmp.kurt(axis=0)
kurt_def[i] = tmp.abs().mean()
kurt_def = list(pd.Series(kurt_def))
plot_DR(x=dim_list_def,
ax1_data=kurt_def,
ax2_data=nn_f1_score,
method='ICA',
data='Default Data',
ylabel1='Kurtosis',
ylabel2='F1 Score (Neural Net)',
ax2none=False)
# ## ICA clustering
# In[4]:
ica_elec = FastICA(n_components=10,random_state=seed)
X_ica_elec = ica_elec.fit_transform(X_elec)
ica_def = FastICA(n_components=21,random_state=seed)
X_ica_def = ica_def.fit_transform(X_def)
# In[26]:
from helperfuncs_hw3 import *
clusters = [3,5,10,20,30]
elec_km_dict_ica = fit_clusters(X_ica_elec,y_elec,clusters,method='KM')
elec_gmm_dict_ica = fit_clusters(X_ica_elec,y_elec,clusters,method='GMM')
def_km_dict_ica = fit_clusters(X_ica_def,y_def,clusters,method='KM')
def_gmm_dict_ica = fit_clusters(X_ica_def,y_def,clusters,method='GMM')
# In[27]:
plot_clusters(elec_km_dict_ica,'KM','Electrical (ICA)')
plot_clusters(def_km_dict_ica,'KM','Default (ICA)')
plot_clusters(elec_gmm_dict_ica,'GMM','Electrical (ICA)')
plot_clusters(def_gmm_dict_ica,'GMM','Default (ICA)')
# # ICA NN
# In[5]:
X_train_elec_ica, X_test_elec_ica, y_train_elec_ica, y_test_elec_ica= train_test_split(X_ica_elec,y_elec,test_size=.2,random_state=seed,stratify=y_elec)
X_train_def_ica, X_test_def_ica, y_train_def_ica, y_test_def_ica= train_test_split(X_ica_def,y_def,test_size=.2,random_state=seed,stratify=y_def)
# In[6]:
clf_elec = MLPClassifier(hidden_layer_sizes= (100, 10))
clf_def= MLPClassifier(hidden_layer_sizes= (50,5))
# In[7]:
clf_elec.fit(X_train_elec_ica,y_train_elec_ica)
y_pred_elec_ica=clf_elec.predict(X_test_elec_ica)
print(classification_report(y_test_elec_ica,y_pred_elec_ica))
print(confusion_matrix(y_test_elec_ica,y_pred_elec_ica))
plot_roc_curve(y_test_elec_ica,y_pred_elec_ica,'ROC Curve for MLP Electrical (ICA)')
# In[15]:
clf_def.fit(X_train_def_ica,y_train_def_ica)
y_pred_def_ica=clf_def.predict(X_test_def_ica)
print(classification_report(y_test_def_ica,y_pred_def_ica))
print(confusion_matrix(y_test_def_ica,y_pred_def_ica))
plot_roc_curve(y_test_def_ica,y_pred_def_ica,'ROC Curve for MLP Default (ICA)')
# In[ ]:
# ## ICA NN with clusters
# In[ ]:
n_best_clusters = 10
km_ica_elec = KMeans(n_clusters=n_best_clusters,random_state=seed)
km_ica_def = KMeans(n_clusters=n_best_clusters,random_state=seed)
gmm_ica_elec = GaussianMixture(n_components=n_best_clusters,random_state=seed)
gmm_ica_def = GaussianMixture(n_components=n_best_clusters,random_state=seed)
labels_ica_km_elec = np.reshape(km_ica_elec.fit_predict(X_ica_elec),(-1,1))
labels_ica_km_def = np.reshape(km_ica_def.fit_predict(X_ica_def),(-1,1))
labels_ica_gmm_elec = np.reshape(gmm_ica_elec.fit_predict(X_ica_elec),(-1,1))
labels_ica_gmm_def = np.reshape(gmm_ica_def.fit_predict(X_ica_def),(-1,1))
X_ica_clust_elec = np.hstack((X_ica_elec,labels_ica_km_elec,labels_ica_gmm_elec))
X_ica_clust_def = np.hstack((X_ica_def,labels_ica_km_def,labels_ica_gmm_def))
# In[ ]:
X_train_ica_clust_elec, X_test_ica_clust_elec, y_train_ica_clust_elec, y_test_ica_clust_elec= train_test_split(X_ica_clust_elec,y_elec,test_size=.2,random_state=seed,stratify=y_elec)
X_train_ica_clust_def, X_test_ica_clust_def, y_train_ica_clust_def, y_test_ica_clust_def= train_test_split(X_ica_clust_def,y_def,test_size=.2,random_state=seed,stratify=y_def)
clf_elec = MLPClassifier(hidden_layer_sizes= (100, 10))
clf_def= MLPClassifier(hidden_layer_sizes= (50,5))
# In[ ]:
get_ipython().run_cell_magic('time', '', "\nclf_elec.fit(X_train_ica_clust_elec,y_train_ica_clust_elec)\ny_pred_ica_clust_elec=clf_elec.predict(X_test_ica_clust_elec)\n\nprint(classification_report(y_test_ica_clust_elec,y_pred_ica_clust_elec))\nprint(confusion_matrix(y_test_ica_clust_elec,y_pred_ica_clust_elec))\n\nplot_roc_curve(y_test_elec_ica,y_pred_elec_ica,'ROC Curve for MLP Electrical (ica + Clustering)')")
# In[13]:
clf_def.fit(X_train_ica_clust_def,y_train_def_ica)
y_pred_ica_clust_def=clf_def.predict(X_test_ica_clust_def)
print(classification_report(y_test_ica_clust_def,y_pred_ica_clust_def,))
print(confusion_matrix(y_test_ica_clust_def,y_pred_ica_clust_def))
plot_roc_curve(y_test_ica_clust_def,y_pred_ica_clust_def,'ROC Curve for MLP Default (ica + clustering)')
# In[ ]:
| null |
ICA_cchen376.py
|
ICA_cchen376.py
|
py
| 7,747 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.FastICA",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.FastICA",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.FastICA",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.FastICA",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 261,
"usage_type": "call"
}
] |
550014964
|
import cv2
import numpy as np
class SlidingWindowLocalization:
"""
Uses sliding window localization to localize the human's centroid using a Haar Cascade model
"""
def __init__(self, images):
images = np.array(images)
images = images.reshape(images.shape[0], 240, 320)
height = 1
width = 0.5
self.px = []
self.py = []
i_count=0
self.classifier = cv2.CascadeClassifier("C:/Users/gcper/Code/STEM/haarcascade_fullbody.xml")
self.rects = []
self.boxes = []
for j, image in enumerate(images):
# print("Sliding on image ",j)
image = image.reshape(240, 320)
rects = []
for x, y, size, window in self.slide(image, height, width):
person = self.is_person(window)
if person:
rects.append([size, [x, y]])
points = sorted(rects, key=lambda i:i[1][0])[len(rects) // 3: ((len(rects) * 2) // 3) + 1]
l = len(points)
if l != 0:
if len(rects) > 0:
points = rects
if len(points) != 0:
self.px.append(sum([c[1][0] for c in points]) // len(points))
self.py.append(sum([c[1][1] for c in points]) // len(points))
left_x = sum([points[i][1][0] - (points[i][0][0]//2) for i in range(l)]) // l
left_y = sum([points[i][1][1] - (points[i][0][1]//2) for i in range(l)]) // l
size_w = sum([points[i][0][0] for i in range(l)]) // l
size_h = sum([points[i][0][1] for i in range(l)]) // l
self.boxes.append([[left_x, left_y], [left_x + size_w, left_y + size_h]])
else:
# print("0 humans detected")
self.px.append(0)
self.py.append(0)
self.boxes.append([[0, 0], [0, 0]])
def sliding_window(self, image, step_size, window_size):
# slide a window across the image
for y in range(0, image.shape[0]-window_size[1], step_size):
for x in range(0, image.shape[1]-window_size[0], step_size):
# yield the current window
tmp_x, tmp_y = x + (window_size[0] // 2), y + (window_size[1] // 2)
yield (tmp_x, tmp_y, window_size, image[y:y + window_size[1], x:x + window_size[0]])
def slide(self, image, height, width):
# image size 240 by 320
for size in range(200, 240, 20)[::-1]:
for window in self.sliding_window(image, 5, [int(width * size), int(height * size)]):
# generator function yields windows of varying sizes, 240 down to 100 in steps of 20
yield window
def is_person(self, image):
return bool(len(self.classifier.detectMultiScale(image.astype(np.uint8))))
if __name__ == "__main__":
from versions.swl1 import main
main.main()
| null |
versions/swl2/slidingwindow.py
|
slidingwindow.py
|
py
| 2,941 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "versions.swl1.main.main",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "versions.swl1.main",
"line_number": 72,
"usage_type": "name"
}
] |
202600481
|
import sys
import csv
from csv import reader
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_auc_score, precision_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.model_selection import GridSearchCV, KFold, cross_validate
from sklearn.preprocessing import StandardScaler
from utils import ClassifierGrids
from evaluation import *
class eviction_classifier:
def __init__(self, data_dir):
"""
constructor
input:
data_dir (str): path where time sliced data splits are located.
"""
self.result_accumulator = {} # accumulate results for later graphing
self.data_dir = data_dir
def train_test_split(self, training_year, testing_year):
"""
Takes care of extracting features and labels from testing and training year datasets
Also takes care of scaling
training_year [(str)]: array of strings. represents the years that are being used for training data
testing_year [(str)]: array of strings. represents the years that are being used for testing data
"""
scaler = StandardScaler()
training_features = []
training_labels = []
for year in training_year:
fp = "%s/%s.csv" % (self.data_dir,year)
readCSV = csv.reader(open(fp), delimiter=',')
for row in readCSV:
features = [float(x) for x in row[3:]]
features[-1] = int(features[-1])
training_features.append(tuple(features))
training_labels.append(int(float(row[0])))
testing_features = []
testing_labels = []
for year in testing_year:
fp = "%s/%s.csv" % (self.data_dir,year)
readCSV = csv.reader(open(fp), delimiter=',')
for row in readCSV:
features = [float(x) for x in row[3:]]
features[-1] = int(features[-1])
testing_features.append(tuple(features))
testing_labels.append(int(float(row[0])))
training_features = scaler.fit_transform(training_features)
testing_features = scaler.fit_transform(testing_features)
return {"training_features": tuple(training_features),
"training_labels": tuple(training_labels),
"testing_features": tuple(testing_features),
"testing_labels": tuple(testing_labels)}
def run_classifiers(self, data, classifier, parameters_grid, test_year, draw_prec_rec_curve=False, draw_feat_imp=False):
"""
Runs a single classifier through hypterparameter grid search.
Inputs:
Data (dict): training and testing features and labels used in the model
Classifier (sklearn.classifier): sklearn classifier object to run grid search on
Parameters_grid (dict): Dictionary of parameters to grid search through, written in config.py
test_year (str): year that the test data is from
draw (str)(optional): Will draw feature importance for classifier if True
"""
clf = GridSearchCV(classifier, parameters_grid)
pred = clf.fit(data['training_features'], data['training_labels'])
pred_labels = pred.predict(data['testing_features'])
if draw_feat_imp:
draw_feature_importance(clf)
if draw_prec_rec_curve:
y_scores=clf.predict_proba(data['testing_features'])
draw_precision_recall_curve(data["testing_labels"], y_scores)
roc_auc = roc_auc_score(data["testing_labels"], pred_labels, average='macro')
prec = precision_score(data["testing_labels"], pred_labels, average='macro')
self.persist_result(classifier.__class__.__name__, clf.best_params_, roc_auc,
prec, data["training_labels"], data["testing_labels"], test_year, write_out=True)
def run_all(self, years_range=(2010,2016)):
"""
Runs all classifiers. Also plots performance over time for all classifiers.
years_range (int,int)(opt): year range to do analysis on.
"""
for clf in ClassifierGrids.clf_list:
classifier, param_grid = ClassifierGrids.clf_list[clf]
first_year = True
for year in range(years_range[0]+1, years_range[1]+1):
data = None
if first_year:
data = self.train_test_split([year-1], [year])
first_year = False
else:
data = self.train_test_split([year-1, year-2], [year])
self.run_classifiers(data, classifier, param_grid, year)
draw_temporal_perf_graph(self.result_accumulator)
print(self.result_accumulator)
return
def persist_result(self, classifier, best_params, roc_auc_score, precision, train_labels, test_labels, test_year, write_out=False):
"""
Prints results, writes them to a log file, and also adds result to the object results dictionary.
classifier (sklearn.classifier): classifier type
best_params (dict): best parameters
roc_auc_score (float): roc_auc score for this setting
precision (float): precision score for this setting
train_labels ([int]): training labels
test_labels ([int]): testing labels
test_year (int): the year that the testing split was on
write_out (bool)(opt): writes to log if True
"""
if classifier not in self.result_accumulator:
self.result_accumulator[classifier] = {}
self.result_accumulator[classifier][test_year] = (roc_auc_score, precision)
print("======= %s %s ========" % (classifier, test_year))
print ("BEST PARAMS: %s" % (best_params) )
print("Training Set Size:%s" % (len(train_labels)))
print("Testing Set Size: %s" % (len(test_labels)))
print("ROC AUC Score: %s" % (roc_auc_score))
print("Precision: %s" % (precision))
if write_out:
f = open("results.txt", "a+")
f.write("======= %s : %s ========\n" % (classifier, test_year))
f.write("BEST PARAMS: %s\n" % (best_params) )
f.write("Training Set Size:%s\n" % (len(train_labels)))
f.write("Testing Set Size: %s\n" % (len(test_labels)))
f.write("ROC AUC Score: %s\n" % (roc_auc_score))
f.write("Precision: %s\n" % (precision))
if __name__ == "__main__":
e = eviction_classifier("../../datasets/time_sliced")
print(e.run_all())
| null |
src/model/core.py
|
core.py
|
py
| 6,765 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_score",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "utils.ClassifierGrids.clf_list",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "utils.ClassifierGrids",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "utils.ClassifierGrids.clf_list",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "utils.ClassifierGrids",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 145,
"usage_type": "name"
}
] |
472183572
|
# run with
# python test_dask_proc.py
import yt
import os
from dask.distributed import Client
from profiler import ProfileManager, test_info
from timeit import default_timer as timer
if __name__ == "__main__":
c = Client(n_workers = 4, threads_per_worker=1)
ds = yt.load_sample("snapshot_033")
ttype = "dask_multiproc"
sdir = os.path.join(".", "results", ttype)
if os.path.isdir(sdir) is False:
os.mkdir(sdir)
for test_iter in range(test_info['iterations']):
p = ProfileManager(ttype)
sp = ds.sphere(ds.domain_center, 0.5)
p.enable()
vals = sp[("PartType0", "Density")].compute()
p.disable()
saveprof = os.path.join(sdir, f"it_{test_iter}.prof")
p.dump_stats(saveprof)
# raw time measure
p = ProfileManager(ttype, nproc=4)
times = []
for test_iter in range(test_info['iterations']):
sp = ds.sphere(ds.domain_center, 0.5)
t0 = timer()
vals = sp[("PartType0", "Density")]
t1 = timer()
times.append(t1 - t0)
p.save_rawtimes("results", times, "sphere")
| null |
code/test_dask_proc.py
|
test_dask_proc.py
|
py
| 1,121 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dask.distributed.Client",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "yt.load_sample",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "profiler.test_info",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "profiler.ProfileManager",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "profiler.ProfileManager",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "profiler.test_info",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "timeit.default_timer",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 41,
"usage_type": "call"
}
] |
518548683
|
#!/usr/bin/python
#_*_ encoding:utf-8_*_
"""
日志管理模块
"""
import logging
"""创建日志"""
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)
handler = logging.FileHandler("Scraping_log2.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
| null |
Scraping/practice/Douyu/log.py
|
log.py
|
py
| 404 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.FileHandler",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 13,
"usage_type": "call"
}
] |
628111448
|
import numpy as np
import cv2
import time
confThreshold = 0.5
nmsThreshold = 0.2
inpWidth = 416
inpHeight = 416
def get_coods(img, outs):
frameHeight, frameWidth = img.shape[0], img.shape[1]
classIds, confidences, boxes = [], [], []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
return[boxes[i[0]] for i in indices]
start = time.time()
cfg_path = "../yolo/yolov3-tiny.cfg"
weights_path = "../yolo/yolov3-tiny.weights"
net = cv2.dnn.readNetFromDarknet(cfg_path , weights_path)
classes = []
with open("../yolo/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
frame = cv2.imread('../media/view3.jpeg')
background = cv2.imread('../media/s.jpg')
img = cv2.resize(frame, (504, 378))
bgdModel = np.zeros((1,65), np.float64)
fgdModel = np.zeros((1,65), np.float64)
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
rects = get_coods(img, outs)
for rect in rects:
mask = np.zeros(img.shape[:2],np.uint8)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
img = cv2.resize(img, (4032, 3024))
non_black_pixels_mask = np.all(img != [0, 0, 0], axis=-1)
background[non_black_pixels_mask] = frame[non_black_pixels_mask]
cv2.imwrite("output3c.jpg", background)
end = time.time()
print(end-start)
| null |
programs/background_change.py
|
background_change.py
|
py
| 2,506 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.argmax",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.NMSBoxes",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.readNetFromDarknet",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "cv2.dnn.blobFromImage",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "cv2.grabCut",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.GC_INIT_WITH_RECT",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 77,
"usage_type": "call"
}
] |
215217540
|
#!/usr/bin/env python3
'''
This is a set of functions used by pymm scripts
This file is organized into 4 main sections:
* CONFIG CHECK STUFF
* PYMM ADMIN / LOGGING STUFF
* FILE CHECK STUFF
* SYSTEM / ENVIRONMENT STUFF
'''
import configparser
from datetime import date
import glob
import hashlib
import json
import os
import platform
import re
import subprocess
import sys
import shutil
import time
# nonstandard libraries:
import Levenshtein
# local modules:
try:
import dbReporters
import loggers
import makeMetadata
import MySQLqueries
import directoryScanner
except:
from . import dbReporters
from . import loggers
from . import makeMetadata
from . import MySQLqueries
from . import directoryScanner
################################################################
#
# CONFIG CHECK STUFF
#
def read_config():
pymmDirectory = os.path.dirname(os.path.abspath(__file__))
configPath = os.path.join(pymmDirectory,'pymmconfig','config.ini')
if not os.path.isfile(configPath):
print('''
CONFIGURATION PROBLEM:\n
YOU HAVE NOT YET SET CONFIG.INI OR IT IS MISSING.\n
RUN pymmconfig.py TO CREATE CONFIG.INI AND CHOOSE YOUR DESIRED SETTINGS.\n
NOW EXITING.
''')
sys.exit()
config = configparser.SafeConfigParser()
config.read(configPath)
return config
def check_missing_ingest_paths(pymmConfig):
requiredPaths = {
'outdir_ingestsip':'the ingestSip.py output path',
'aip_staging':'the AIP storage path',
'resourcespace_deliver':'the resourcespace output path'
}
missingPaths = 0
for path in requiredPaths.items():
if not os.path.isdir(pymmConfig['paths'][path[0]]):
missingPaths += 1
print('''
CONFIGURATION PROBLEM:
You have not yet set a valid directory for '{}.' Please run pymmConfig.py,
edit the config file directly,
or use '--{}' to set {}.
HINT: Check that the filepath is entered correctly.
'''.format(path[0],path[0],path[1])
)
if missingPaths > 0:
print("\nYou are missing some required file paths and we have to quit. Sorry.")
sys.exit()
#
# END CONFIG CHECK STUFF
#
################################################################
################################################################
#
# PYMM ADMIN / LOGGING STUFF
#
# have to import dbAccess after init config to avoid circular error
try:
import dbAccess
except:
from . import dbAccess
def cleanup_package(CurrentIngest,pathForDeletion,reason,outcome=None):
# print(pathForDeletion)
inputType = CurrentIngest.InputObject.inputType
dontDelete = False
if reason == "ABORTING":
status = 'ABORTING'
event = 'ingestion end'
if not outcome:
outcome = (
"Something went critically wrong... "
"The ingest process was aborted."
"\n{}\nand its contents have been deleted.".format(
pathForDeletion
)
)
# put the things back
try:
if inputType == 'file':
_object = [
thing.path for thing in
os.scandir(CurrentIngest.packageObjectDir)
if thing.is_file()
][0]
if os.path.isfile(CurrentIngest.InputObject.inputPath):
pass
else:
shutil.move(
_object,
CurrentIngest.InputObject.inputParent
)
else:
if not os.path.isdir(CurrentIngest.InputObject.inputPath):
os.mkdir(CurrentIngest.InputObject.inputPath)
for _object in os.scandir(CurrentIngest.packageObjectDir):
if _object.name not in ('resourcespace','prores'):
shutil.move(
_object.path,
CurrentIngest.InputObject.inputPath
)
except:
dontDelete = True
outcome = ("COULD NOT REPLACE ORIGINAL COPIES!! \
NOT DELETING {}!".format(pathForDeletion))
print(outcome)
elif reason == 'done':
status = 'OK'
event = 'deletion'
outcome = (
"Deleting original copies "
"of object at {}".format(pathForDeletion)
)
if os.path.isdir(pathForDeletion) and dontDelete == False:
try:
shutil.rmtree(pathForDeletion)
except:
outcome = (
"Could not delete the package at "
+pathForDeletion+
". Try deleting it manually?"
)
print(outcome)
CurrentIngest.caller = 'pymmFunctions.cleanup_package()'
loggers.end_log(
CurrentIngest,
event,
outcome,
status
)
def validate_SIP_structure(SIPpath):
'''
Check that all the top-level stuff expected in a package exists.
Don't go too deep...
Current expected structure is:
UUID/
UUID/
metadata/
objects_manifest_UUID_iso8601.txt
objectCanonicalName_pbcore.xml
logs/
ingestLog.txt
ffmpeglog
rsyncLog
objects/
masterobject1_framemd5.md5
masterobject2_framemd5.md5
masterobject1_mediainfo.xml
masterobject2_mediainfo.xml
resourcespace/
resourcespace_mediainfo.xml
objects/
masterobject1
masterobject2
resourcespace/
resourcespace1
resourcespace2
# (changed this 7/16/18) hashdeep_manifest_UUID_iso8601.txt
'''
structureValidated = True
status = "OK"
_UUID = os.path.basename(SIPpath)
# define the directories to check
ingestDir = os.path.join(SIPpath,_UUID)
metadataDir = os.path.join(ingestDir,'metadata')
logDir = os.path.join(metadataDir,'logs')
objectMetadataDir = os.path.join(metadataDir,'objects')
objectDir = os.path.join(ingestDir,'objects')
dirs = [ingestDir,metadataDir,logDir,objectMetadataDir,objectDir]
reasonsFailed = []
# check that they exist
# I should log the non-existence of any of these
# maybe rename the SIP to FAILED-UUID?
for thing in dirs:
if not os.path.isdir(thing):
structureValidated = False
failure = "missing {}".format(os.path.basename(thing))
reasonsFailed.append(failure)
print(failure)
# use glob to search for the existence of
# 1) hashdeep manifest
# 2) pbcore xml file
objectManifestPattern = os.path.join(
metadataDir,
'objects_manifest_*'
)
manifest = glob.glob(objectManifestPattern)
if manifest == []:
failure = "missing a hashdeep manifest for the SIP object directory"
reasonsFailed.append(failure)
print(failure)
structureValidated = False
pbcorePattern = os.path.join(metadataDir,'*_pbcore.xml')
pbcore = glob.glob(pbcorePattern)
if pbcore == []:
failure = "missing a pbcore xml description for the object"
reasonsFailed.append(failure)
print(failure)
structureValidated = False
if structureValidated:
outcome = "SIP validated against expected structure"
else:
outcome = "SIP failed to validate for these reasons:\n~ {}\n".format(
"\n~ ".join(reasonsFailed)
)
return structureValidated,outcome
def database_connection(user):
connection = dbAccess.DB(user)
try:
connection.connect()
return connection
except:
print("DB connection problem...")
return False
def do_query(connection,sql,*args):
'''
must be passed an open mysql.connector.connection.MySQLConnection object
'''
cursor = connection.query(sql,*args)
return cursor
def parse_pbcore_xml(pbcoreFile):
pbcoreString = ''
with open(pbcoreFile,'r') as f:
for line in f:
pbcoreString += line
return pbcoreString
#
# END PYMM ADMIN / LOGGING STUFF
#
################################################################
################################################################
#
# FILE CHECK STUFF
#
def is_video(inputPath):
# Look for a video stream with codec_type == 'video'
ffprobe = [
'ffprobe',
'-v','error',
'-i',inputPath,
'-print_format','json',
'-show_streams',
'-select_streams','v'
]
try:
probe = subprocess.run(ffprobe,stdout=subprocess.PIPE)
out = probe.stdout.decode('utf-8')
output = json.loads(out)
try:
codec_type = output['streams'][0]['codec_type']
if codec_type == 'video':
if not any([x in output['streams'][0]['codec_name'] for x in ('jpeg','jpg','png')]):
return True
else:
return False
except:
return False
except:
return False
def is_audio(inputPath):
print("THIS ISN'T A VIDEO FILE\n"
'maybe this is an audio file?')
# DO THE SAME AS ABOVE BUT codec_type == 'audio'
ffprobe = [
'ffprobe',
'-v','error',
'-i',inputPath,
'-print_format','json',
'-show_streams',
'-select_streams','a'
]
try:
probe = subprocess.run(ffprobe,stdout=subprocess.PIPE)
out = probe.stdout.decode('utf-8')
output = json.loads(out)
try:
codec_type = output['streams'][0]['codec_type']
if codec_type == 'audio':
print("This appears to be an audio file!")
return True
except:
print("THIS DOESN'T SMELL LIKE AN AUDIO FILE EITHER")
# print(output)
return False
except:
print("INVALID FILE INPUT, NOT AUDIO EITHER")
return False
def is_av(inputPath):
'''
run tests for video, then audio, then DPX seq, then give up.
@FIXME - this should return a more verbose/useful
explanation of failed tests.
Currently the expected return value os Boolean when is_av() is called.
'''
_is_video = is_video(inputPath)
_is_audio = False
_is_dpx = False
_is_dpx_av = False
if _is_video == True:
return 'VIDEO'
else:
_is_audio = is_audio(inputPath)
if _is_audio:
return 'AUDIO'
else:
if os.path.isdir(inputPath):
# if it's a folder, see if it's a DPX sequence
try:
# test for a valid folder structure
_is_dpx,details = directoryScanner.main(inputPath)
print(_is_dpx)
print(details)
except:
print('error scanning input!')
return False
if _is_dpx:
if _is_dpx == 'dpx':
print('THIS IS AN IMAGE SEQUENCE!')
return 'DPX'
else:
# if it passes the folder structure, run
# mediainfo to check for dpx contents
status, failedDirs = test_sequence_reel_dir(inputPath)
if status == True:
print('THIS IS AN IMAGE SEQUENCE!')
return 'DPX'
else:
print(
'ERROR: check out this list of '
'problem directories: {}'.format(failedDirs)
)
return False
else:
return None
else:
return None
def test_sequence_reel_dir(reelPath):
'''
Take a directory that should contain only a wav file
and a corresponding directory with an image sequence in it.
If there's a problem with one or more of the directories return
it/them in a list.
'''
failedDirs = []
failures = 0
for item in os.scandir(reelPath):
if item.name == 'documentation':
break
if item.is_dir():
print(item.path)
if item.name.lower() == 'dpx':
_is_dpx = is_dpx_sequence(item.path)
if not _is_dpx:
failedDirs.append(item.path)
failures += 1
else:
failedDirs.append(item.path)
failures += 1
if failures > 0:
return False, failedDirs
else:
return True, failedDirs
def is_dpx_sequence(inputPath):
'''
run mediainfo on the 'dpx' folder
'''
_is_dpx_av = False
try:
format = get_mediainfo_value(inputPath,'General','Format')
if any([('dpx','directory') for x in format.lower()]):
_is_dpx_av = True
else:
pass
except:
_is_dpx_av = False
return _is_dpx_av
def check_policy(ingestType,inputPath):
print('do policy check stuff')
policyStatus = "result of check against mediaconch policy"
return policyStatus
def dir_or_file(inputPath):
if os.path.isdir(inputPath):
return 'dir'
elif os.path.isfile(inputPath):
return 'file'
else:
return False
def get_base(inputPath,base='basename'):
bases = {'basename':'','baseMinusExtension':'','ext_original':''}
if not base in bases.keys():
return "_:(_"
else:
try:
basename = os.path.basename(inputPath)
bases['basename'] = basename
baseAndExt = os.path.splitext(basename)
baseMinusExtension = baseAndExt[0]
bases['baseMinusExtension'] = baseMinusExtension
ext_original = baseAndExt[1]
bases['ext_original'] = ext_original
return bases[base]
except:
print("error getting basename")
return "_:(_"
def abspath_list(directory):
paths = []
for filename in os.listdir(directory):
path = os.path.abspath(os.path.join(directory, filename))
# path = path.replace(' ','\\ ')
paths.append(path)
return paths
def check_dir_filename_distances(directory):
'''
Check a directory to be ingested for wildly divergent filenames.
We will currently only want to allow single-level directories of
files that represent parts of a whole and thus have fairly
similar filenames.
'''
_list = abspath_list(directory)
names = []
for name in _list:
if os.path.isfile(name):
if not os.path.basename(name).startswith('.'):
names.append(name)
median = Levenshtein.median(names)
# print(median)
outliers = 0 # start a counter for the number of files that diverge from the median name
outlierList = [] # and list them
for name in names:
distance = Levenshtein.distance(median,name)
# print(distance)
if distance > 15:
outliers += 1
outlierList.append(name)
return outliers,outlierList
def check_for_outliers(inputPath):
'''
Use distance check function to approve/deny
viability of directory ingest.
'''
goodNames = True
outliers, outlierList = check_dir_filename_distances(inputPath)
if outliers > 0:
outlierListString = '\n'.join(outlierList)
warning = (
"Hey, there are {} files that seem like they might not belong"\
" in the input directory. If you think this is incorrect, check"\
" the filenames below. Maybe make them more consistent.\n"\
"Here's a list of possible outliers:\n{}".format(
str(outliers),
outlierListString
)
)
return False,outlierList
else:
return True,None
def list_files(_input):
'''
Take in an absolute path of a directory and return a list of the paths
for everything in it.
'''
if os.path.isdir(_input):
source_list = []
for _file in os.listdir(_input):
if os.path.isdir(_file) and not _file.lower() == 'documentation':
print("you have unexpected subdirectories!")
else:
source_list.append(os.path.join(_input,_file))
source_list.sort()
return source_list
else:
print("you're trying to list files but the input is a file. go away.")
# sys.exit()
pass
def get_temp_id(_string):
'''
Generate a hash of a string (i.e., of an input path) that can be used
to produce a *locally* unique temporary ID during the ingestSIP process.
For convenience (?) I'm cutting it down to 10 digits.
example: ingestSIP -i 'foo.mov' --> tempID = a8bcd6d073
where:
sha256 = a8bcd6d073c91f6132f6d64674ecaf658a33c4aedde4046b0b7bf64e9c723073
'''
pathHash = hashlib.sha256(_string.encode()).hexdigest()
tempID = pathHash[:10]
return tempID
def rename_dir(_dir,newName):
if os.path.isdir(_dir):
path = os.path.dirname(_dir)
newPath = os.path.join(path,newName)
try:
newDir = os.rename(_dir,newPath)
return newPath
except OSError as e:
print("OOPS: {}".format(e))
else:
print("{} is not a directory so go away.".format(_dir))
def convert_millis(milli):
'''
Lifted directly from IFIscripts. Written by Kieran O'Leary.
Requires an integer that is the number of milliseconds.
For example mediainfo returns '00:51:58.824' as a string '3118.824'
so you gotta remove the period, convert to integer, and parse here.
Accepts milliseconds and returns this value as HH:MM:SS.NNN
'''
# get the number of seconds and milliseconds
a = datetime.timedelta(milliseconds=milli)
# convert to a handy string that looks like '0:51:58.824000'
# so we can check for milliseconds present
b = str(a)
# no millseconds are present if there is no remainder. We need milliseconds!
if len(b) == 7:
b += '.000000'
# convert seconds-based tuple to H:M:S:ms tuple
timestamp = datetime.datetime.strptime(b, "%H:%M:%S.%f").time()
# turn that into a string like '0:51:58.824000'
c = str(timestamp)
if len(c) == 8:
c += '.000000'
# trim off the unneeded zeros
return str(c)[:-3]
def get_audio_sample_rate(inputPath):
# get the sample rate for an audio file
_type = 'Audio'
fieldName = 'SamplingRate'
rate = get_mediainfo_value(
inputPath,
_type,
fieldName
)
return rate
def get_encoded_date(inputPath):
encodedDate = get_mediainfo_value(
inputPath,
'General',
'Encoded_Date'
)
return encodedDate
def get_mediainfo_value(inputPath,_type,fieldName):
'''
inspired by IFIscripts and StackOverflow answer by Jerome M.
Note: you don't need quotation marks here after --inform parameter
which you do need when calling mediainfo from command line.
`_type` is either General, Audio, or Video
`fieldName` is the raw field name
(look at `mediainfo --Language=raw --Full /file/path`
to see all the fields)
'''
mediainfo = [
'mediainfo',
'--inform={};%{}%'.format(_type,fieldName),
inputPath
]
out = subprocess.run(mediainfo,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
value = out.stdout.decode().rstrip()
return value
def get_framerate(inputPath):
'''
get the framerate from a video file
'''
framerate = get_mediainfo_value(
inputPath,
'Video',
'FrameRate'
)
return framerate
def parse_sequence_parent(inputPath):
'''
input path should only ever be:
title_acc#_barcode_reel#/
dpx/
title_acc#_barcode_reel#_sequence#.dpx
[optionaltitle_acc#_barcode_reel#.wav]
this function returns a few variables:
* audioPath = path to an audio file (should be .wav in all our cases)
* filePattern = pattern for ffmpeg to parse
* startNumber = first sequence number
* framerate = embedded by scanner in DPX files
'''
directoryScanner.main(inputPath)
for entry in os.scandir(inputPath):
if entry.is_file():
if entry.name.endswith('.wav'):
audioPath = entry.path
else:
audioPath = None
elif entry.is_dir():
# should be a single DPX dir with only dpx files in it
filePattern,startNumber,file0 = parse_sequence_folder(entry.path)
try:
framerate = get_framerate(file0)
except:
framerate = None
# try/except test to catch no audio path in silent DPX scans
try:
audioPath = audioPath
except:
audioPath = None
return audioPath,filePattern,startNumber,framerate
def parse_sequence_folder(dpxPath):
'''
Grab some information needed for ffmpeg transcoding of an image sequence:
* the /path/plus/file_%6d.dpx type pattern needed for ffmpeg
* the starting number of the sequence
* the /path/to/first/file in the sequence
'''
files = []
scan = os.scandir(dpxPath)
for entry in scan:
files.append(entry.path)
files.sort()
file0 = files[0]
match = re.search(r'(.*)(\d{6}|\d{7})(\..+)',file0)
fileBase = match.group(1)
startNumber = match.group(2)
numberOfDigits = len(startNumber)
extension = match.group(3)
filePattern = "{}%0{}d{}".format(fileBase,numberOfDigits,extension)
# print(filePattern,startNumber,file0)
return filePattern,startNumber,file0
def get_stream_count(inputPath,_type="video"):
'''
Count the data streams present in an av file.
Specify _type as "audio" or "video" (default)
For example, a file with audio track(s) should return one line per stream:
'streams.stream.0.index=1'
Tally these lines and take that as the count of audio streams.
'''
probeCommand = [
'ffprobe', '-hide_banner',
inputPath,
'-select_streams', _type[:1], # get the first letter of _type (a or v)
'-show_entries', 'stream=index',
'-of', 'flat'
]
count = None
try:
count = 0
probe = subprocess.run(
probeCommand,
stdout=subprocess.PIPE, # this should return a list of streams
stderr=subprocess.PIPE
)
count += len(probe.stdout.splitlines())
except Exception as e:
print(e)
pass
return count
def check_dual_mono(inputPath):
'''
Check if a video file has the first two audio streams mono
inspired by `mmfunctions` _has_first_two_tracks_mono()
'''
probes = []
dualMono = None
for index in range(2):
probe = [
'ffprobe',
inputPath,
"-show_streams",
"-select_streams","a:{}".format(index)
]
out = subprocess.run(
probe,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
probes.append(out)
probe1 = probes[0]
probe2 = probes[1]
stream1Channels = [
x for x in probe1.stdout.decode().splitlines() \
if x.startswith('channels=')
]
stream2Channels = [
x for x in probe2.stdout.decode().splitlines() \
if x.startswith('channels=')
]
if stream1Channels == stream2Channels == ['channels=1']:
dualMono = True
else:
dualMono = False
return dualMono
def check_empty_mono_track(inputPath):
'''
Check if one mono audio track is basically empty.
Intended usage is with a dual mono file so we can remove
an empty track and use the non-empty one as track 1.
NB: setting "empty" as below -50dB RMS (root mean square) level,
this could be tweaked!
'''
# ffmpeg -i /Users/michael/Desktop/test_files/illuminated_extract.mov -map 0:a:1 -af astats -f null -
empty = {0:False,1:False}
for stream in range(2):
command = [
'ffmpeg',
'-i',inputPath,
'-map','0:a:{}'.format(stream),
'-af','astats',
'-f','null','-'
]
# print(command)
output = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stats = [line for line in output.stderr.decode().splitlines()]
chopped = [re.sub(r'\[Parsed_astats.+\]\ ','',line) for line in stats]
leveldB = [
int(float(line.replace('RMS level dB: ',''))) for line in chopped \
if line.startswith('RMS level dB: ')
]
print(leveldB)
try:
if leveldB[1] < -50:
empty[stream] = True
except:
pass
print(empty)
returnValue = None
count = 0
if any([v for v in empty.values()]):
for k,v in empty.items():
if not v:
returnValue = k # return the stream id to keep
else:
count += 1
if count > 1:
returnValue = 'both'
else:
returnValue = None
return returnValue
#
# END FILE CHECK STUFF
#
################################################################
################################################################
#
# SYSTEM / ENVIRONMENT STUFF
#
def get_system():
if sys.platform.startswith("darwin"):
return "mac"
elif sys.platform.startswith("win"):
return "windows"
elif sys.platform.startswith("linux"):
return "linux"
else:
return False
def system_info():
info = platform.uname()
systemDict = dict(info._asdict())
systemString = ""
systemDict['ffmpeg version'] = get_ffmpeg_version()
systemDict['mediainfo version'] = get_mediainfo_version()
# format a string to be returned with each bit of info on a new line
for k,v in systemDict.items():
systemString += "{} : {}\n".format(k,v)
return systemString
def get_node_name():
nodeName = platform.uname().node
return nodeName
def timestamp(style=None):
knownStyles = ['iso8601','YMD','now','8601-filename']
if style in knownStyles:
if style == 'iso8601':
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S")
elif style == 'YMD':
timestamp = str(date.today())
elif style == '8601-filename':
timestamp = time.strftime("%Y-%m-%dT%H-%M-%S")
elif style == 'now':
timestamp = time.strftime("%Y%m%d_%H%M%S")
return timestamp
else:
return str(date.today())
def get_caller():
caller = sys.argv[0]
caller = os.path.splitext(caller)
return caller
def get_ffmpeg_version():
call = subprocess.check_output(['ffmpeg','-version'])
version = call.decode('utf-8').split()[2]
return version
def get_mediainfo_version():
call = subprocess.check_output(['mediainfo','--version'])
version = ' '.join(call.decode('utf-8').split())
return version
def set_ffreport(dest,caller):
ffmpegVersion = get_ffmpeg_version()
os.environ["FFREPORT"] = "file="+os.path.join(dest,"ffmpeg-"+ffmpegVersion+"_"+timestamp('now')+"_"+caller+".txt")
return "set FFREPORT to "+dest
def unset_ffreport():
del os.environ["FFREPORT"]
def get_unix_ip():
# totally stolen from mmfunctions
ifconfig = subprocess.Popen(['ifconfig'],stdout=subprocess.PIPE)
grep = subprocess.Popen(['grep','inet '],stdin=ifconfig.stdout,stdout=subprocess.PIPE)
tail = subprocess.Popen(['tail','-n1'],stdin=grep.stdout,stdout=subprocess.PIPE)
thestuff = subprocess.Popen(['cut','-d',' ','-f2'],stdin=tail.stdout,stdout=subprocess.PIPE)
ip = thestuff.communicate()[0].decode().rstrip()
return ip
def boolean_answer(string):
thumbsUp = ['YES','Yes','yes','y','Y',True,1,'True','1']
thumbsDown = ['NO','No','no','n','N',False,0,'False','0']
if string in thumbsUp:
return True
elif string in thumbsDown:
return False
else:
print("Not a Boolean answer... try again.")
return "Not Boolean"
def sanitize_dragged_linux_path(var):
if get_system() == 'linux':
if len(var) >= 3 and var[0] == var[-1] == "'":
var = var[1:-1]
return var
else:
print("???")
return var
else:
return var
def gcp_test():
'''
test that `gcp` is installed and get the path for the binary.
test for a dbus error on linux and add `dbus-launch` if needed.
'''
whichGcp = subprocess.run(['which','gcp'],stdout=subprocess.PIPE)
gcpPath = whichGcp.stdout.decode().rstrip()
gcpCommand = [gcpPath]
if gcpPath == '':
print('gcp is not installed.')
else:
tryGcp = subprocess.run(gcpCommand,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
if "DBusException" in tryGcp.stderr.decode():
gcpCommand.insert(0,"dbus-launch")
return gcpCommand
def recursive_chmod(path,mode=0o777):
chmodded = False
try:
os.chmod(path,mode)
chmodded = True
for root,dirs,files in os.walk(path):
for directory in dirs:
try:
os.chmod(os.path.join(root,directory),mode)
chmodded = True
except:
chmodded = False
for f in files:
try:
os.chmod(os.path.join(root,f),mode)
chmodded = True
except:
chmodded = False
except:
chmodded = False
return chmodded
def remove_hidden_system_files(inputPath):
removed = []
dont_remove = ['.git','.tmp.drivedownload']
for root,dirs,files in os.walk(inputPath):
for f in os.listdir(root):
if f.startswith('.'):
# weird list comprehension to make sure not to delete
# files accidentally - checks for .git/gitignore
# and Drive hidden files; can add to list!!
if not any(
[x for x in f if (f in dont_remove) or (
[x for x in dont_remove if x in f]
)
]
):
target = os.path.join(root,f)
os.remove(target)
removed.append(target)
print("removed a system file at {}".format(target))
for _dir in dirs:
for f in os.listdir(os.path.join(root,_dir)):
if f.startswith('.'):
if not any(
[x for x in f if (f in dont_remove) or (
[x for x in dont_remove if x in f]
)
]
):
target = os.path.join(root,_dir,f)
removed.append(target)
os.remove(target)
print("removed a system file at {}".format(target))
return removed
def get_desktop():
desktop = os.path.expanduser("~/Desktop")
return desktop
def get_filesystem_id(path):
'''
input a path and return the filesystem id
* use to compare filesystem identities for `mv` vs `rsync`
when running moveNcopy
'''
fs_id = os.stat(path).st_dev
return fs_id
#
# SYSTEM / ENVIRONMENT STUFF
#
################################################################
today = str(date.today())
now = timestamp('now')
iso8601 = timestamp('iso8601')
pymmConfig = read_config()
# pymmLogDir = pymmConfig['logging']['pymm_log_dir']
# pymmLogPath = os.path.join(pymmLogDir,'pymm_log.txt')
| null |
pymmFunctions.py
|
pymmFunctions.py
|
py
| 27,072 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.dirname",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "configparser.SafeConfigParser",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "shutil.move",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "loggers.end_log",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "dbAccess.DB",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "directoryScanner.main",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 440,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 453,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 467,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 483,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 484,
"usage_type": "attribute"
},
{
"api_name": "Levenshtein.median",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "Levenshtein.distance",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 526,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 529,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 532,
"usage_type": "attribute"
},
{
"api_name": "hashlib.sha256",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 555,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 555,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 556,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 556,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 557,
"usage_type": "attribute"
},
{
"api_name": "os.rename",
"line_number": 559,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 583,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 583,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 627,
"usage_type": "attribute"
},
{
"api_name": "directoryScanner.main",
"line_number": 657,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 658,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 688,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 693,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 721,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 723,
"usage_type": "attribute"
},
{
"api_name": "subprocess.PIPE",
"line_number": 724,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 747,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 749,
"usage_type": "attribute"
},
{
"api_name": "subprocess.PIPE",
"line_number": 750,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 791,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 793,
"usage_type": "attribute"
},
{
"api_name": "subprocess.PIPE",
"line_number": 794,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 797,
"usage_type": "call"
},
{
"api_name": "sys.platform.startswith",
"line_number": 836,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 836,
"usage_type": "attribute"
},
{
"api_name": "sys.platform.startswith",
"line_number": 838,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 838,
"usage_type": "attribute"
},
{
"api_name": "sys.platform.startswith",
"line_number": 840,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 840,
"usage_type": "attribute"
},
{
"api_name": "platform.uname",
"line_number": 846,
"usage_type": "call"
},
{
"api_name": "platform.uname",
"line_number": 858,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 866,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 868,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 868,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 870,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 872,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 875,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 875,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 878,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 879,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 879,
"usage_type": "attribute"
},
{
"api_name": "subprocess.check_output",
"line_number": 883,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 888,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 894,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 894,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 894,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 898,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 902,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 902,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 903,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 903,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 904,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 904,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 905,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 905,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 937,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 937,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 943,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 943,
"usage_type": "attribute"
},
{
"api_name": "os.chmod",
"line_number": 952,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 954,
"usage_type": "call"
},
{
"api_name": "os.chmod",
"line_number": 957,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 957,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 957,
"usage_type": "attribute"
},
{
"api_name": "os.chmod",
"line_number": 964,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 964,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 964,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 977,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 978,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 989,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 989,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 990,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 994,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 994,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 994,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 1002,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1002,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 1004,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 1010,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1010,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 1019,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 1027,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 1027,
"usage_type": "name"
}
] |
360756884
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (C) 2007-2012 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Django modules
from django.db import models
# Third-party modules
from south.db import db
class Migration:
def forwards(self):
db.delete_table('sa_task')
def backwards(self):
# Model 'Task'
db.create_table('sa_task', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('task_id', models.IntegerField("Task",unique=True)),
('start_time', models.DateTimeField("Start Time",auto_now_add=True)),
('end_time', models.DateTimeField("End Time")),
('profile_name', models.CharField("Profile",max_length=64)),
('stream_url', models.CharField("Stream URL",max_length=128)),
('action', models.CharField("Action",max_length=64)),
('args', models.TextField("Args")),
('status', models.CharField("Status",max_length=1,choices=[("n","New"),("p","In Progress"),("f","Failure"),("c","Complete")])),
('out', models.TextField("Out"))
))
db.send_create_signal('sa', ['Task'])
| null |
sa/migrations/0004_no_task.py
|
0004_no_task.py
|
py
| 1,302 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "south.db.db.delete_table",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "south.db.db",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "south.db.db.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "south.db.db",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "south.db.db.send_create_signal",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "south.db.db",
"line_number": 34,
"usage_type": "name"
}
] |
141517354
|
# -*- coding: utf-8 -*-
from time import sleep
import sys
import random
import cloud4rpi
import psutil
import rpi
import os
import re
from statistics import mean
pattern = re.compile("^cpu[0-9]?\-thermal$")
#import RPi.GPIO as GPIO # pylint: disable=F0401
# Put your device token here. To get the token,
# sign up at https://cloud4rpi.io and create a device.
DEVICE_TOKEN = os.environ['TOKEN']
# Constants
DATA_SENDING_INTERVAL = 30 # secs
DIAG_SENDING_INTERVAL = 60 # secs
POLL_INTERVAL = 0.5 # 500 ms
# Configure GPIO library
#GPIO.setmode(GPIO.BOARD)
#GPIO.setup(LED_PIN, GPIO.OUT)
# Handler for the button or switch variable
def led_control(value=None):
# GPIO.output(LED_PIN, value)
# return GPIO.input(LED_PIN)
pass
def cpu_percent(value=None):
return psutil.cpu_percent(interval=1)
def cpu_freq(value=None):
freq_tuple = psutil.cpu_freq()
return freq_tuple.current
def mem_percent(value=None):
mem = psutil.virtual_memory()
return mem.percent
def swap_percent(value=None):
swap = psutil.swap_memory()
return swap.percent
def disk_percent(value=None):
disk = psutil.disk_usage('/')
return disk.percent
def cpu_temp(value=None):
temps = psutil.sensors_temperatures()
temp_values_arr = []
for temp_key, temp_val in temps.items():
if (pattern.match(temp_key)):
temp_values_arr.append(temp_val[0].current)
return mean(temp_values_arr)
def main():
variables = {
# 'LED On': {
# 'type': 'bool',
# 'value': False,
# 'bind': led_control
# },
'CPU Temp': {
'type': 'numeric',
'bind': cpu_temp
},
'CPU %': {
'type' : 'numeric',
'bind' : cpu_percent
},
'CPU freq' : {
'type' : 'numeric',
'bind' : cpu_freq
},
'Memory Usage %': {
'type' : 'numeric',
'bind' : mem_percent
},
'Swap Usage %': {
'type' : 'numeric',
'bind' : swap_percent
},
'Disk Usage %': {
'type' : 'numeric',
'bind' : disk_percent
}
}
diagnostics = {
'IP Address': rpi.ip_address,
'Host': rpi.host_name,
'Operating System': rpi.os_name
}
device = cloud4rpi.connect(DEVICE_TOKEN)
# Use the following 'device' declaration
# to enable the MQTT traffic encryption (TLS).
#
# tls = {
# 'ca_certs': '/etc/ssl/certs/ca-certificates.crt'
# }
# device = cloud4rpi.connect(DEVICE_TOKEN, tls_config=tls)
try:
device.declare(variables)
device.declare_diag(diagnostics)
device.publish_config()
# Adds a 1 second delay to ensure device variables are created
sleep(1)
data_timer = 0
diag_timer = 0
while True:
if data_timer <= 0:
device.publish_data()
data_timer = DATA_SENDING_INTERVAL
if diag_timer <= 0:
device.publish_diag()
diag_timer = DIAG_SENDING_INTERVAL
sleep(POLL_INTERVAL)
diag_timer -= POLL_INTERVAL
data_timer -= POLL_INTERVAL
except KeyboardInterrupt:
cloud4rpi.log.info('Keyboard interrupt received. Stopping...')
except Exception as e:
error = cloud4rpi.get_error_message(e)
cloud4rpi.log.exception("ERROR! %s %s", error, sys.exc_info()[0])
finally:
sys.exit(0)
if __name__ == '__main__':
main()
| null |
cloud4rpi_monitor.py
|
cloud4rpi_monitor.py
|
py
| 3,774 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "re.compile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "psutil.cpu_percent",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "psutil.cpu_freq",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "psutil.virtual_memory",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "psutil.swap_memory",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "psutil.disk_usage",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "psutil.sensors_temperatures",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "rpi.ip_address",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "rpi.host_name",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "rpi.os_name",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "cloud4rpi.connect",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "cloud4rpi.log.info",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "cloud4rpi.log",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "cloud4rpi.get_error_message",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "cloud4rpi.log.exception",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "cloud4rpi.log",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "sys.exc_info",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 149,
"usage_type": "call"
}
] |
444973323
|
#libraries
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
#import pandas as pd
def plot_stacked_bar(bars1, bars2, names, file_name):
# y-axis in bold
rc('font', weight='bold')
# Values of each group
# bars1 = [12, 28, 1, 8, 22]
# bars2 = [28, 7, 16, 4, 10]
# bars3 = [25, 3, 23, 25, 17]
# Heights of bars1 + bars2
bars = np.add(bars1, bars2).tolist()
# The position of the bars on the x-axis
#r = [0,1,2,3,4]
#x = len(bars1)
r = [i for i in range (0,len(bars1))]
# Names of group and bar width
#names = ['A','B','C','D','E']
barWidth = 1
# Create brown bars
plt.bar(r, bars1, color='#7f6d5f', edgecolor='white', width=barWidth, label= 'Academy Students')
# Create green bars (middle), on top of the firs ones
plt.bar(r, bars2, bottom=bars1, color='#557f2d', edgecolor='white', width=barWidth, label='External')
# Create green bars (top)
#plt.bar(r, bars3, bottom=bars, color='#2d7f5e', edgecolor='white', width=barWidth)
# Create legend
plt.legend()
# Custom X axis
plt.xticks(r, names, fontweight='normal', rotation=90)
#plt.xticks(r, names, rotation=90)
plt.xlabel("Zoom Meeting Date")
plt.subplots_adjust(bottom=0.25, top=0.96)
# Show graphic
#plt.show()
plt.savefig(file_name)
def display_image(filename):
img = Image.open(filename)
img.show()
| null |
Zoom/graph.py
|
graph.py
|
py
| 1,449 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.rc",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.add",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 52,
"usage_type": "name"
}
] |
130524939
|
'''
Copyright (C) 2018 Busacca Davide
This file is part of PV.
PV is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation (FSF), either version 3 of the License, or (at your
option) any later version.
PV is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the Affero GNU General Public License
version 3 along with PV. If not, see http://www.gnu.org/licenses/
'''
import os, sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
import STFT
import utils as U
def ISTFT(X, frameSize = 4096, hopSize= 2048, fftshift = True, windowType='hann', zeroPadding=0, normalized=False):
'''
Inverse Short Time Fourier Transform
ISTFT using Least Squared Error Estimation for Modified Spectrograms
Add description
'''
#normalization commented because apparently useless
#Creating window
w = U.windowing(np.ones(frameSize), frameSize, typeWindow=windowType)
#Computing squared windows for LSEE
window = np.power(w, 2)
#Computing IFFT, appending frames and windows
i=0
for frame_X in X:
x_t = np.real(ifft(frame_X))
x_t = U.zeroPhasing(x_t, frameSize, zeroPadding=zeroPadding, fftshift=fftshift, inverse=True)
x_w = x_t*w
if i==0:
i = 1
x_f = x_w
ow_f = window
else:
x_f = np.append(x_f, x_w)
ow_f = np.append(ow_f, window)
#The TSM Toolbox here has a "restore energy" part.
#Overlapping and adding frames and windows
x = U.myOverlapAdd(x_f, frameSize, hopSize)
ow = U.myOverlapAdd(ow_f, frameSize, hopSize)
ow[np.where(ow<0.001)] = 1 #avoid division by 0
#Apparently the normalization is not needed
#Normalization
#if normalized:
# x = x/(frameSize+zeroPadding)
#Least Squares Error Estimation
x = x/(ow + np.finfo(float).eps) #LSEE (avoiding division by zero)
#Deleting half frame at the beginning and half at the end
#Not used because it introduces artefacts in the magnitude spectrogram
#x = x[int(frameSize/2):int(-frameSize/2)]
return x, ow
def callback(nameInput='../sounds/sine.wav', nameOutput='processed/sine_STFT.wav', frameSize=3071, zeroPadding=1025, hopSize=256, windowType='hann', fftshift=True):
normalized=False #parameter not considered in the ISTFT function
#Loading audio
x, fs = U.wavread(nameInput)
#Computing STFT
X = STFT.STFT(x, frameSize, hopSize, fftshift, windowType, zeroPadding)
#Computing ISTFT
[y, ow] = ISTFT(X, frameSize=frameSize, hopSize=hopSize, fftshift=fftshift, windowType=windowType, normalized=normalized, zeroPadding=zeroPadding)
#Writing audio output
U.wavwrite(y, fs, nameOutput)
#Plotting
fig = plt.figure(figsize=(15, 15), dpi= 80, facecolor='w', edgecolor='k')
fig.canvas.set_window_title('Signals and Window')
tmp = fig.add_subplot(3,1,1)
tmp.plot(U.getTimeAxis(x, fs), x)
tmp.set_title('Original Signal')
tmp = fig.add_subplot(3,1,2)
tmp.plot(U.getTimeAxis(y, fs), y)
tmp.set_title('Re-Synthesized Signal')
tmp = fig.add_subplot(3,1,3)
tmp.plot(U.getTimeAxis(ow, fs), ow)
tmp.set_title('Sum of (Squared) Windows')
[tx, fx] = U.getSpectrogramAxis(X, fs, hopSize)
endX = int(X.shape[1]/2+1)
fig2 = plt.figure(figsize=(15, 15), dpi= 80, facecolor='w', edgecolor='k')
fig2.canvas.set_window_title('Spectrograms')
tmp = fig2.add_subplot(2,1,1)
tmp.pcolormesh(tx, fx, np.transpose(U.amp2db(U.getMagnitude(X[:, :endX]))))
tmp.set_title('Original Magnitude Spectrogram')
tmp = fig2.add_subplot(2,1,2)
plt.pcolormesh(tx, fx, np.transpose(np.diff(U.getPhase(X[:, :endX]))))
tmp.set_title('Differential of the Original Phase Spectrogram')
plt.show()
# Evaluating the difference between input and re-synthesized signals
print("The sum of the differences between the original signal and the resynthsized using the STFT is: " + str(U.distance2signals(x, y, frameSize)) )
if __name__ == "__main__":
callback()
| null |
spectrogram/ISTFT.py
|
ISTFT.py
|
py
| 4,546 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "utils.windowing",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack.ifft",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "utils.zeroPhasing",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "utils.myOverlapAdd",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "utils.myOverlapAdd",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.finfo",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "utils.wavread",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "STFT.STFT",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "utils.wavwrite",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "utils.getTimeAxis",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "utils.getTimeAxis",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "utils.getTimeAxis",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "utils.getSpectrogramAxis",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "numpy.transpose",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "utils.amp2db",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "utils.getMagnitude",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pcolormesh",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "numpy.transpose",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "utils.getPhase",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "utils.distance2signals",
"line_number": 121,
"usage_type": "call"
}
] |
372976317
|
# 主要作用是载入数据集,处理npz文件,因为keras自带的访问不能获取,只能本地完成了
# (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)input_file = "D:\用户目录\Desktop\郭磊\keras\imdb.npz"
from __future__ import absolute_import
from six.moves import zip
import numpy as np
import json
import warnings
# 下面的函数是keras的sequence的自带函数,但是我导入不进去,只能取出来了
def _remove_long_seq(maxlen, seq, label):
# 移除超过指定最大长度的序列,返回,序列 和 标签
"""Removes sequences that exceed the maximum length.
# Arguments
maxlen: int, maximum length
seq: list of lists where each sublist is a sequence
label: list where each element is an integer
# Returns
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label = [], []
for x, y in zip(seq, label):
if len(x) < maxlen:
new_seq.append(x)
new_label.append(y)
return new_seq, new_label
# get_file_path 是文件的地址,本来是个keras的函数
def load_data(get_file_path, num_words=None, skip_top=0,
maxlen=None, seed=113,
start_char=1, oov_char=2, index_from=3, **kwargs):
# Legacy support
# out of vocabulary 非推荐术语; 非规范用语;
if 'nb_words' in kwargs:
warnings.warn('The `nb_words` argument in `load_data` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
# pop() 函数用于移除列表中的一个元素(默认最后一个元素),并且返回该元素的值。
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
path = get_file_path# 输入imdb.npz的地址 get_file_path本来是个函数,远程访问的函数,keras功能函数
with np.load(path) as f:
x_train, labels_train = f['x_train'], f['y_train']
x_test, labels_test = f['x_test'], f['y_test']
np.random.seed(seed)
# seed( ) 用于指定随机数生成时所用算法开始的整数值,
# 如果使用相同的seed( )值,则每次生成的随即数都相同,
# 如果不设置这个值,则系统根据时间来自己选择这个值,
# 此时每次生成的随机数因时间差异而不同。
np.random.shuffle(x_train) # 按照相同的随机值进行打乱
np.random.seed(seed)
np.random.shuffle(labels_train) # 按照相同的随机值进行打乱
# 打乱测试集
np.random.seed(seed * 2)
np.random.shuffle(x_test)
np.random.seed(seed * 2)
np.random.shuffle(labels_test)
xs = np.concatenate([x_train, x_test])# 把训练集和测试集中输出的特征序列sequence连接起来
labels = np.concatenate([labels_train, labels_test])# 把训练集和测试集中 标签 连接起来
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
if maxlen:
xs, labels = _remove_long_seq(maxlen, xs, labels)
if not xs:
raise ValueError('After filtering for sequences shorter than maxlen=' +
str(maxlen) + ', no sequence was kept. '
'Increase maxlen.')
if not num_words:
num_words = max([max(x) for x in xs])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [[w if (skip_top <= w < num_words) else oov_char for w in x] for x in xs]
else:
xs = [[w for w in x if (skip_top <= w < num_words)] for x in xs]
idx = len(x_train)
x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])
# 最后训练的也是array类型
x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])
return (x_train, y_train), (x_test, y_test)
def get_word_index(get_file_path):
path = get_file_path
f = open(path)
data = json.load(f)
f.close()
return data
| null |
imdb.py
|
imdb.py
|
py
| 4,179 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "six.moves.zip",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 99,
"usage_type": "call"
}
] |
89472880
|
from pytorch_rrt import UniformActionSpace, ActionDescription, \
UniformStateSpace, State, StateDescription, \
KinodynamicRRT, Visualizer
from typing import Iterable
import torch
# from window_recorder.recorder import WindowRecorder
import matplotlib.pyplot as plt
class TwoDActionSpace(UniformActionSpace):
MAX_ACTION = 0.3
@classmethod
def description(cls) -> Iterable[ActionDescription]:
return [ActionDescription("dx", -cls.MAX_ACTION, cls.MAX_ACTION),
ActionDescription("dy", -cls.MAX_ACTION, cls.MAX_ACTION)]
class TwoDStateSpace(UniformStateSpace):
MAX_STATE = 3
@classmethod
def description(cls) -> Iterable[StateDescription]:
return [StateDescription("x", -cls.MAX_STATE, cls.MAX_STATE),
StateDescription("y", -cls.MAX_STATE, cls.MAX_STATE)]
def distance(self, s1: State, s2: State) -> torch.tensor:
return (s1 - s2).view(-1, self.dim()).norm(dim=1)
class TwoDVisualizer(Visualizer):
def draw_state(self, x: State, color='k', s=4, alpha=0.2):
x = x.cpu().numpy()
plt.scatter(x[0], x[1], color=color, s=s, alpha=alpha)
# plt.pause(0.0001)
def draw_connect(self, x_start: State, x_next: State):
self.draw_state(x_next)
plt.plot([x_start[0].cpu().numpy(), x_next[0].cpu().numpy()],
[x_start[1].cpu().numpy(), x_next[1].cpu().numpy()], color='gray', linewidth=1, alpha=0.2)
plt.pause(0.0001)
state_space = TwoDStateSpace()
action_space = TwoDActionSpace()
def true_dynamics(state, action, environment=None):
return state + action
# try different true dynamics than given approximate dynamics
dynamics = true_dynamics
def traj_cost(trajectory, goal):
states = torch.stack(trajectory.states)
d = state_space.distance(states, goal)
return d.min()
rrt = KinodynamicRRT(state_space, action_space, dynamics, traj_cost)
goal = state_space.sample((1,)).view(-1)
state = state_space.sample((1,)).view(-1)
def goal_check(trajectory):
states = torch.stack(trajectory.states)
d = state_space.distance(states, goal)
return d.min() < 0.1
vis = TwoDVisualizer()
plt.ion()
plt.figure()
plt.xlim([-4, 4])
plt.ylim([-4, 4])
plt.axis("equal")
vis.draw_state(state, color='k', s=20, alpha=1)
vis.draw_state(goal, color='g', s=20, alpha=1)
plt.draw()
# use RRT in MPC manner, re-plan after each action
while True:
res = rrt.plan(state, goal_check, goal=goal, visualizer=vis)
action = res.trajectory.actions[0]
# step in environment
next_state = true_dynamics(state, action)
state = next_state
vis.draw_state(state, color='k', s=8, alpha=1)
plt.draw()
if state_space.distance(state, goal) < 0.1:
print("done planning state: {} goal: {}".format(state, goal))
break
input('enter to finish')
| null |
tests/twod.py
|
twod.py
|
py
| 2,845 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pytorch_rrt.UniformActionSpace",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pytorch_rrt.ActionDescription",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytorch_rrt.ActionDescription",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pytorch_rrt.ActionDescription",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pytorch_rrt.UniformStateSpace",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pytorch_rrt.StateDescription",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pytorch_rrt.StateDescription",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pytorch_rrt.StateDescription",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pytorch_rrt.State",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pytorch_rrt.Visualizer",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "pytorch_rrt.State",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pytorch_rrt.State",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torch.stack",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pytorch_rrt.KinodynamicRRT",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.draw",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.draw",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
}
] |
282892793
|
# vi: set shiftwidth=4 tabstop=4 expandtab:
import datetime
import re
import itertools
def get_program_from_file(file_path="day14_input.txt"):
with open(file_path) as f:
return [l.strip() for l in f]
# mask = X100110110X011000101000101XX11001X11
mask_re = re.compile(r"^mask = (?P<mask>[01X]+)$")
# mem[5201] = 1838761
mem_re = re.compile(r"^mem\[(?P<addr>\d+)\] = (?P<value>\d+)")
def apply_mask(value, mask):
value = format(value, "0%db" % len(mask))
bits = [bv if bm == "X" else bm for bv, bm in zip(value, mask)]
return int("".join(bits), 2)
def all_combinations(lst):
for l in range(len(lst) + 1):
for comb in itertools.combinations(lst, l):
yield comb
def apply_mask2(value, mask):
value = format(value, "0%db" % len(mask))
floating = []
base_val = 0
for i, (bval, bmask) in enumerate(reversed(list(zip(value, mask)))):
val = 2 ** i
if bmask == "X":
floating.append(val)
elif int(bmask) or int(bval):
base_val += val
return [base_val + sum(comb) for comb in all_combinations(floating)]
def run_program(program):
mask = ""
memory = dict()
for line in program:
m_mask = mask_re.fullmatch(line)
if m_mask is not None:
d = m_mask.groupdict()
mask = d["mask"]
else:
m_mem = mem_re.fullmatch(line)
if m_mem is not None:
d = m_mem.groupdict()
memory[int(d["addr"])] = apply_mask(int(d["value"]), mask)
else:
raise ValueError(line)
return sum(v for v in memory.values())
def run_program2(program):
mask = ""
memory = dict()
for line in program:
m_mask = mask_re.fullmatch(line)
if m_mask is not None:
d = m_mask.groupdict()
mask = d["mask"]
else:
m_mem = mem_re.fullmatch(line)
if m_mem is not None:
d = m_mem.groupdict()
v = int(d["value"])
for addr in apply_mask2(int(d["addr"]), mask):
memory[addr] = v
else:
raise ValueError(line)
return sum(v for v in memory.values())
def run_tests():
example1 = [
"mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X",
"mem[8] = 11",
"mem[7] = 101",
"mem[8] = 0",
]
assert apply_mask(11, "XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X") == 73
assert apply_mask(101, "XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X") == 101
assert apply_mask(0, "XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X") == 64
assert run_program(example1) == 165
example2 = [
"mask = 000000000000000000000000000000X1001X",
"mem[42] = 100",
"mask = 00000000000000000000000000000000X0XX",
"mem[26] = 1",
]
assert sorted(apply_mask2(42, "000000000000000000000000000000X1001X")) == [
26,
27,
58,
59,
]
assert sorted(apply_mask2(26, "00000000000000000000000000000000X0XX")) == [
16,
17,
18,
19,
24,
25,
26,
27,
]
assert run_program2(example2) == 208
def get_solutions():
program = get_program_from_file()
print(run_program(program) == 6559449933360)
print(run_program2(program) == 3369767240513)
if __name__ == "__main__":
begin = datetime.datetime.now()
run_tests()
get_solutions()
end = datetime.datetime.now()
print(end - begin)
| null |
day14.py
|
day14.py
|
py
| 3,513 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 127,
"usage_type": "attribute"
}
] |
175701350
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Fred Callaway
# Copyright (c) 2015 Fred Callaway
# Copyright (c) 2017 FichteFoll <[email protected]>
#
# License: MIT
#
"""This module exports the Mypy plugin class."""
import logging
import os
import re
import shutil
import tempfile
import getpass
from SublimeLinter.lint import const
from SublimeLinter.lint import PythonLinter
USER = getpass.getuser()
TMPDIR_PREFIX = "SublimeLinter-contrib-mypy-%s" % USER
logger = logging.getLogger("SublimeLinter.plugin.mypy")
# Mapping for our created temporary directories.
# For smarter caching purposes,
# we index different cache folders based on the working dir.
tmpdirs = {}
class Mypy(PythonLinter):
"""Provides an interface to mypy."""
executable = "mypy"
regex = r'^[^:]+:(?P<line>\d+):((?P<col>\d+):)?\s*((?P<error>error)|(?P<warning>warning)):\s*(?P<message>.+)'
line_col_base = (1, 1)
tempfile_suffix = 'py'
default_type = const.WARNING
# Pretty much all interesting options don't expect a value,
# so you'll have to specify those in "args" anyway.
# This dict only contains settings for which we have special handling.
defaults = {
'selector': "source.python",
# Will default to tempfile.TemporaryDirectory if empty.
"--cache-dir:": "",
# Allow users to disable this
"--incremental": True,
# Need this to silent lints for other files. Alternatively: 'skip'
"--follow-imports:": "silent",
}
def run(self, *args):
# Column numbers were 0-based before version 0.570
version = self._get_version()
if version < (0, 520):
# abort lint
raise RuntimeError("mypy linter plugin requires at least version 0.520")
if version < (0, 570):
self.line_col_base = (1, 0)
else:
self.line_col_base = (1, 1)
return super().run(*args)
def cmd(self):
"""Return a list with the command line to execute."""
cmd = [
self.executable,
'${args}',
'--show-column-numbers',
'--hide-error-context',
# '--incremental',
]
if self.filename:
cmd.extend([
# --shadow-file SOURCE_FILE SHADOW_FILE
#
# '@' needs to be the (temporary) shadow file,
# while we request the normal filename
# to be checked in its normal environment.
'--shadow-file', '${file}', '${temp_file}',
# The file we want to lint on the surface
'${file}',
])
else:
cmd.append('${temp_file}')
# Add a temporary cache dir to the command if none was specified.
# Helps keep the environment clean
# by not littering everything with `.mypy_cache` folders.
settings = self.get_view_settings()
if not settings.get('cache-dir'):
cwd = os.getcwd()
if cwd in tmpdirs:
cache_dir = tmpdirs[cwd].name
else:
tmp_dir = tempfile.TemporaryDirectory(prefix=TMPDIR_PREFIX)
tmpdirs[cwd] = tmp_dir
cache_dir = tmp_dir.name
logger.info("Created temporary cache dir at: %s", cache_dir)
cmd[1:1] = ["--cache-dir", cache_dir]
return cmd
def _get_version(self):
"""Determine the linter's version by command invocation."""
success, cmd = self.context_sensitive_executable_path(self.executable)
if isinstance(cmd, str):
cmd = [cmd]
cmd.append('--version')
output = self.communicate(cmd)
match = re.search(r"(\d+)\.(\d+)(?:\.(\d+))?", output)
if not match:
logger.info("failed to determine mypy version. output:\n%s", output)
return ()
version = tuple(int(g) for g in match.groups() if g)
logger.info("mypy version: %s", version)
return version
def _cleanup_tmpdirs():
def _onerror(function, path, exc_info):
logger.exception("mypy: Unable to delete '%s' while cleaning up temporary directory", path,
exc_info=exc_info)
tmpdir = tempfile.gettempdir()
for dirname in os.listdir(tmpdir):
if dirname.startswith(TMPDIR_PREFIX):
shutil.rmtree(os.path.join(tmpdir, dirname), onerror=_onerror)
def plugin_loaded():
"""Attempt to clean up temporary directories from previous runs."""
_cleanup_tmpdirs()
def plugin_unloaded():
"""Clear references to TemporaryDirectory instances.
They should then be removed automatically.
"""
# (Actually, do we even need to do this?)
tmpdirs.clear()
| null |
linter.py
|
linter.py
|
py
| 4,822 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "getpass.getuser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "SublimeLinter.lint.PythonLinter",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "SublimeLinter.lint.const.WARNING",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "SublimeLinter.lint.const",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "tempfile.gettempdir",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 132,
"usage_type": "attribute"
}
] |
146572208
|
import asyncio
from contextlib import suppress
from copy import copy
from io import BytesIO
import discord
from redbot.core import commands, checks, Config
from redbot.core.utils import common_filters, mod
from redbot.core.utils.chat_formatting import pagify, humanize_list
from redbot.core.i18n import Translator, cog_i18n
check_permissions = getattr(mod, "check_permissions", checks.check_permissions)
from .converter import RiftConverter, search_converter
Cog = getattr(commands, "Cog", object)
listener = getattr(Cog, "listener", lambda: lambda x: x)
_ = Translator("Rift", __file__)
max_size = 8_000_000 # can be 1 << 23 but some unknowns also add to the size
async def close_check(ctx):
"""Admin / manage channel OR private channel"""
if isinstance(ctx.channel, discord.DMChannel):
return True
return await mod.is_admin_or_superior(ctx.bot, ctx.author) or await check_permissions(
ctx, {"manage_channels": True}
)
class RiftError(Exception):
pass
class Rift(Cog):
"""
Communicate with other servers/channels.
"""
def __init__(self, bot):
super().__init__()
self.bot = bot
self.open_rifts = {}
self.config = Config.get_conf(self, identifier=2_113_674_295, force_registration=True)
self.config.register_channel(blacklisted=False)
self.config.register_guild(blacklisted=False)
self.config.register_user(blacklisted=False)
self.config.register_global(notify=True)
# COMMANDS
@commands.group()
async def rift(self, ctx):
"""
Communicate with other channels through Red.
"""
pass
@rift.group()
async def blacklist(self, ctx):
"""
Configures blacklists.
Blacklisted destinations cannot have rifts opened to them.
"""
pass
@blacklist.command(name="channel")
@commands.check(close_check)
async def blacklist_channel(self, ctx, *, channel: discord.TextChannel = None):
"""
Blacklists the current channel or the specified channel.
Can also blacklist DM channels.
"""
if channel and isinstance(ctx.channel, discord.DMChannel):
raise commands.BadArgument(_("You cannot blacklist a channel in DMs."))
if isinstance(ctx.channel, discord.DMChannel):
channel = ctx.author
group = self.config.user(channel)
else:
channel = channel or ctx.channel
group = self.config.channel(channel)
blacklisted = not await group.blacklisted()
await group.blacklisted.set(blacklisted)
await ctx.maybe_send_embed(
_("Channel is {} blacklisted.".format("now" if blacklisted else "no longer"))
)
if blacklisted:
await self.close_rifts(ctx, ctx.author, channel)
@blacklist.command(name="server")
@commands.guild_only()
@checks.admin_or_permissions(manage_guild=True)
async def blacklist_server(self, ctx):
"""
Blacklists the current server.
All channels and members in a server are considered blacklisted if the server is blacklisted.
Members can still be reached if they are in another, non-blacklisted server.
"""
group = self.config.guild(ctx.guild)
blacklisted = not await group.blacklisted()
await group.blacklisted.set(blacklisted)
await ctx.maybe_send_embed(
_("Server is {} blacklisted.".format("now" if blacklisted else "no longer"))
)
if blacklisted:
await self.close_rifts(ctx, ctx.author, ctx.guild)
@rift.command(name="close")
@commands.check(close_check)
async def rift_close(self, ctx):
"""
Closes all rifts that lead to this channel.
"""
channel = ctx.author if isinstance(ctx.channel, discord.DMChannel) else ctx.channel
await self.close_rifts(ctx, ctx.author, channel)
@rift.command(name="notify")
@checks.is_owner()
async def rift_notify(self, ctx, *, notify: bool = None):
"""
Toggle whether the bot notifies the destination of an open rift.
The notification is only disabled for bot owners, and
will still notify channels the bot owner doesn't have direct access to.
"""
if notify is None:
notify = not await self.config.notify()
await self.config.notify.set(notify)
await ctx.send(
_(
"I will {} notify destinations when you open new rifts.".format(
"now" if notify else "no longer"
)
)
)
@rift.command(name="open")
async def rift_open(self, ctx, *rifts: RiftConverter(_, globally=True)):
"""
Opens a rift to the specified destination.
The destination may be any channel or user that both you and the bot are connected to, even across servers.
"""
if not rifts:
return await ctx.send_help()
rifts = set(rifts)
no_notify = await self.bot.is_owner(ctx.author) and not await self.config.notify()
for rift in rifts:
if no_notify and isinstance(rift.destination, discord.abc.GuildChannel):
mem = rift.destination.guild.get_member(ctx.author.id)
if mem and rift.destination.permissions_for(mem).read_messages:
notify = False
else:
notify = True
else:
notify = True
self.open_rifts[rift] = {"notify": notify}
if notify:
ctx.bot.loop.create_task(
rift.destination.send(_("{} has opened a rift to here.").format(rift.author))
)
await ctx.send(
_(
"A rift has been opened to {}! Everything you say will be relayed there.\nResponses will be relayed here.\nType `exit` to quit."
).format(humanize_list([str(rift.destination) for rift in rifts]))
)
@rift.command(name="search")
async def rift_search(self, ctx, searchby: search_converter(_) = None, *, search=None):
"""
Searches through open rifts.
searchby: author, source, or destination. If this isn't provided, all
three are searched through.
search: Search for the specified author/source/destination. If this
isn't provided, the author or channel of the command is used.
"""
searchby = searchby or list(range(3))
if search is None:
search = [ctx.author, ctx.channel, ctx.author]
else:
search = await RiftConverter.search(ctx, search, False, _)
results = set()
for rift in self.open_rifts:
for i in searchby:
if rift[i] in search:
results.add(rift)
if not results:
return await ctx.maybe_send_embed(_("No rifts were found with these parameters."))
message = _("Results:") + "\n\n"
message += "\n".join(str(rift) for rift in results)
for page in pagify(message):
await ctx.maybe_send_embed(page)
# UTILITIES
async def close_rifts(self, ctx, closer, destination):
if isinstance(destination, discord.Guild):
check = lambda rift: rift.destination in destination.channels
else:
check = lambda rift: rift.destination == destination
noclose = True
for rift in self.open_rifts.copy():
if check(rift):
del self.open_rifts[rift]
noclose = False
await rift.source.send(
_("{} has closed the rift to {}.").format(closer, rift.destination)
)
await rift.destination.send(_("Rift from {} closed.").format(rift.source))
if noclose:
await ctx.send(_("No rifts were found that connect to here."))
async def get_embed(self, destination, attachments):
attach = attachments[0]
if (
hasattr(destination, "guild")
and await self.bot.db.guild(destination.guild).use_bot_color()
):
color = destination.guild.me.colour
else:
color = self.bot.color
description = "\n\n".join(
f"{self.xbytes(attach.size)}\n**[{attach.filename}]({attach.url})**"
for a in attachments
)
embed = discord.Embed(colour=color, description=description)
embed.set_image(url=attach.url)
return embed
def permissions(self, destination, user, is_owner=False):
if isinstance(destination, discord.User):
return destination.dm_channel.permissions_for(user)
if not is_owner:
member = destination.guild.get_member(user.id)
if member:
return destination.permissions_for(member)
else:
every = destination.guild.default_role
overs = destination.overwrites_for(every)
overs.read_messages = True
overs.send_messages = True
overs = overs.pair()
perms = (every.permissions.value & ~overs[1].value) | overs[0].value
return discord.Permissions(perms)
return discord.Permissions.all()
async def process_message(self, rift, message, destination):
if isinstance(destination, discord.Message):
send_coro = destination.edit
else:
send_coro = destination.send
channel = (
message.author if isinstance(message.channel, discord.DMChannel) else message.channel
)
send = channel == rift.source
destination = rift.destination if send else rift.source
author = message.author
me = (
destination.dm_channel.me
if isinstance(destination, discord.User)
else destination.guild.me
)
is_owner = await self.bot.is_owner(author)
author_perms = self.permissions(destination, author, is_owner)
bot_perms = self.permissions(destination, me)
content = message.content
if not is_owner:
if not author_perms.administrator:
content = common_filters.filter_invites(content)
if not author_perms.mention_everyone:
content = common_filters.filter_mass_mentions(content)
attachments = message.attachments
files = []
embed = None
if attachments and author_perms.attach_files and bot_perms.attach_files:
overs = await asyncio.gather(*(self.save_attach(file, files) for file in attachments))
overs = list(filter(bool, overs))
if overs:
if bot_perms.embed_links:
embed = await self.get_embed(destination, overs)
else:
content += (
"\n\n"
+ _("Attachments:")
+ "\n"
+ "\n".join(f"({self.xbytes(a.size)}) {a.url}" for a in attachments)
)
if not any((content, files, embed)):
raise RiftError(_("No content to send."))
if not is_owner or not send:
content = f"{author}: {content}"
return await send_coro(content=content, files=files, embed=embed)
async def save_attach(self, file: discord.Attachment, files) -> discord.File:
if file.size > max_size:
return file
buffer = BytesIO()
await file.save(buffer, seek_begin=True)
files.append(discord.File(buffer, file.filename))
return None
def xbytes(self, b):
blist = ("B", "KB", "MB")
index = 0
while True:
if b > 900:
b = b / 1024.0
index += 1
else:
return "{:.3g} {}".format(b, blist[index])
# EVENTS
@listener()
async def on_message(self, m):
if m.author.bot:
return
channel = m.author if isinstance(m.channel, discord.DMChannel) else m.channel
sent = {}
is_command = (await self.bot.get_context(m)).valid
for rift, record in self.open_rifts.copy().items():
if rift.source == channel and rift.author == m.author:
if m.content.lower() == "exit":
processed = self.open_rifts.pop(rift)
if processed["notify"]:
with suppress(discord.HTTPException):
await rift.destination.send(
_("{} has closed the rift.").format(m.author)
)
await channel.send(_("Rift closed."))
else:
if not is_command:
try:
record[m] = await self.process_message(rift, m, rift.destination)
except discord.HTTPException as e:
await channel.send(
_("I couldn't send your message due to an error: {}").format(e)
)
elif rift.destination == channel:
rift_chans = (rift.source, rift.destination)
if rift_chans in sent:
record[m] = sent[rift_chans]
else:
record[m] = sent[rift_chans] = await self.process_message(rift, m, rift.source)
async def on_message_delete(self, m):
if m.author.bot:
return
deleted = set()
for record in self.open_rifts.copy().values():
with suppress(KeyError, discord.NotFound):
rifted = record.pop(m)
if rifted not in deleted:
deleted.add(rifted)
await rifted.delete()
async def on_message_edit(self, b, a):
if a.author.bot:
return
channel = a.author if isinstance(a.channel, discord.DMChannel) else a.channel
sent = set()
for rift, record in self.open_rifts.copy().items():
if rift.source == channel and rift.author == a.author:
with suppress(KeyError, discord.NotFound):
await self.process_message(rift, a, record[a])
elif rift.destination == channel:
rift_chans = (rift.source, rift.destination)
if rift_chans not in sent:
sent.add(rift_chans)
with suppress(KeyError, discord.NotFound):
await self.process_message(rift, a, record[a])
| null |
cogs/CogManager/cogs/rift/rift.py
|
rift.py
|
py
| 15,090 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "redbot.core.utils.mod",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "redbot.core.checks.check_permissions",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.checks",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "redbot.core.commands",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "redbot.core.i18n.Translator",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "discord.DMChannel",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.utils.mod.is_admin_or_superior",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "redbot.core.utils.mod",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "redbot.core.Config.get_conf",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "redbot.core.Config",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "redbot.core.commands.group",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "redbot.core.commands",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "discord.TextChannel",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "discord.DMChannel",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.commands.BadArgument",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "redbot.core.commands",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "discord.DMChannel",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.commands.check",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "redbot.core.commands",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "redbot.core.commands.guild_only",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "redbot.core.commands",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "redbot.core.checks.admin_or_permissions",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "redbot.core.checks",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "discord.DMChannel",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.commands.check",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "redbot.core.commands",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "redbot.core.checks.is_owner",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "redbot.core.checks",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "converter.RiftConverter",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "discord.abc",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.utils.chat_formatting.humanize_list",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "converter.search_converter",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "converter.RiftConverter.search",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "converter.RiftConverter",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "redbot.core.utils.chat_formatting.pagify",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "discord.Guild",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "discord.User",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "discord.Permissions",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "discord.Permissions.all",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "discord.Permissions",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "discord.Message",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "discord.DMChannel",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "discord.User",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.utils.common_filters.filter_invites",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "redbot.core.utils.common_filters",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "redbot.core.utils.common_filters.filter_mass_mentions",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "redbot.core.utils.common_filters",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "asyncio.gather",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "discord.Attachment",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "io.BytesIO",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "discord.File",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "discord.File",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "discord.DMChannel",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "contextlib.suppress",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "discord.HTTPException",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "discord.HTTPException",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "contextlib.suppress",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "discord.NotFound",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "discord.DMChannel",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "contextlib.suppress",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "discord.NotFound",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "contextlib.suppress",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "discord.NotFound",
"line_number": 381,
"usage_type": "attribute"
}
] |
359906849
|
import pyglet
from game import star, bulk_load, player, meteorite
game_window = pyglet.window.Window(800, 600)
#pyglet的批量绘制
main_batch = pyglet.graphics.Batch()
#设置文字标签,需要将batch对象传递给物体的构造函数(使用关键字 batch)
level_label = pyglet.text.Label(text="It's a star Game ^。^!", x=400, y=575, anchor_x='center', batch=main_batch)
game_over_label = pyglet.text.Label(text="GAME OVER",
x=400, y=-300, anchor_x='center',
batch=main_batch, font_size=48)
score_label = pyglet.text.Label(text="Score: 0", x=10, y=575, batch=main_batch)
time_label = pyglet.text.Label(text="Time: ", x=150, y=575, batch=main_batch)
player_airship = None
player_lives = []
score = 0
num_stars = 3
num_meteorites = 1
game_objects = []
time_left = 1200
count = 0
level = 0
# 事件栈的size
event_stack_size = 0
def init():
global score, num_stars, num_meteorites, time_left, count
score = 0
time_left = 1200
count = 0
score_label.text = "Score: " + str(score)
time_label.text = "Time: " + str((time_left-count)//120)
num_stars = 3
num_meteorites = 1
reset_level(2)
def reset_level(num_lives=2):
global player_airship, player_lives, game_objects, event_stack_size
# 清空事件栈
while event_stack_size > 0:
game_window.pop_handlers()
event_stack_size -= 1
for life in player_lives:
life.delete()
# 创建Player的实例
player_airship = player.Player(x=400, y=300, batch=main_batch)
# 加载小飞船图标
player_lives = bulk_load.player_lives(num_lives, main_batch)
# 加载多个星星
stars = bulk_load.stars(num_stars,player_airship.position, main_batch)
# 加载多个陨石
meteorites = bulk_load.meteorites(num_meteorites, player_airship.position, main_batch)
# 为了调用每一个物体的update函数,需要一个列表来存放这些物体。
game_objects = [player_airship] + stars + meteorites
'''
告诉pyglet 哪些实例是事件句柄(event handler)
用game_window.push_handlers()函数把它压入事件栈中
'''
for obj in game_objects:
for handler in obj.event_handlers:
game_window.push_handlers(handler)
event_stack_size += 1
#窗口绘制,在相同名字的函数上使用装饰器
@game_window.event
def on_draw():
# 绘制前先清屏
game_window.clear()
# 绘制
main_batch.draw()
def update(dt):
global score, num_stars, num_meteorites, count, time_left, level
player_dead = False
victory = False
'''
检查所有的物体对,物体两两之间要进行检查,两个for就搞定了
'''
for i in range(len(game_objects)):
for j in range(i + 1, len(game_objects)):
obj_1 = game_objects[i]
obj_2 = game_objects[j]
if not obj_1.dead and not obj_2.dead:
if obj_1.collides_with(obj_2): #若发生 碰撞
obj_1.handle_collision_with(obj_2)
obj_2.handle_collision_with(obj_1)
to_add = []
stars_remaining = 0 # 记录还有多少星星
meteorites__remaining = 0 # 记录还有多少个陨石
for obj in game_objects:
obj.update(dt)
to_add.extend(obj.new_objects)
obj.new_objects = []
if isinstance(obj, star.Star):
stars_remaining += 1
if isinstance(obj, meteorite.Meteorite):
meteorites__remaining += 1
if stars_remaining == 0 and meteorites__remaining == 0:
victory = True
# 删除死亡的对象
for to_remove in [obj for obj in game_objects if obj.dead]:
if to_remove == player_airship:
player_dead = True
to_add.extend(to_remove.new_objects)
to_remove.delete()
game_objects.remove(to_remove)
if isinstance(to_remove, star.Star):
score += 1
score_label.text = "Score: " + str(score)
if isinstance(to_remove, meteorite.Meteorite):
score += 5
score_label.text = "Score: " + str(score)
if count < time_left:
count_now = (time_left - count)//120
time_label.text = "Time: " + str(count_now)
count += 1
game_objects.extend(to_add)
if player_dead or count >= time_left:
if len(player_lives) > 0 and count <= time_left:
if count == time_left:
player_airship.delete()
count = 0
reset_level(len(player_lives) - 1)
else:
game_over_label.y = 300
elif victory:
level += 1
num_stars += 1
num_meteorites += 1
player_airship.delete()
score += 10
count = 0
time_left += level*1440
reset_level(len(player_lives))
if __name__ == "__main__":
init()
# 将刷新频率设定为每秒钟120次,pyglet会传递经过的时间dt 作为唯一的参数给update函数
pyglet.clock.schedule_interval(update, 1 / 120.0)
pyglet.app.run()
| null |
python_game2/version/stars.py
|
stars.py
|
py
| 5,108 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyglet.window.Window",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pyglet.window",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pyglet.graphics.Batch",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyglet.graphics",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pyglet.text.Label",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyglet.text",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pyglet.text.Label",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyglet.text",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pyglet.text.Label",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyglet.text",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pyglet.text.Label",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyglet.text",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "game.player.Player",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "game.player",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "game.bulk_load.player_lives",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "game.bulk_load",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "game.bulk_load.stars",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "game.bulk_load",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "game.bulk_load.meteorites",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "game.bulk_load",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "game.star.Star",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "game.star",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "game.meteorite.Meteorite",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "game.meteorite",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "game.star.Star",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "game.star",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "game.meteorite.Meteorite",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "game.meteorite",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "pyglet.clock.schedule_interval",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "pyglet.clock",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "pyglet.app.run",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "pyglet.app",
"line_number": 182,
"usage_type": "attribute"
}
] |
579106880
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import response , request
from listings.models import Listings
from listings.models import Realtor
from listings.choices import price_choices, bedroom_choices, state_choices
# Create your views here.
def index(request):
listings = Listings.objects.order_by('-list_date').filter(is_published=True)[:3]
context = {
'listings':listings,
'price_choices':price_choices,
'bedroom_choices':bedroom_choices,
'state_choices':state_choices
}
return render(request, 'pages/index.html',context)
def about(request):
realtors = Realtor.objects.order_by('-hire_date')
mvp_realtor = Realtor.objects.all().filter(is_mvp =True)
context = {
'realtors':realtors,
'mvps':mvp_realtor
}
return render(request, 'pages/about.html',context)
| null |
pages/views.py
|
views.py
|
py
| 895 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "listings.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "listings.models.Listings.objects.order_by",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "listings.models.Listings.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "listings.models.Listings",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "listings.models",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "listings.choices.price_choices",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "listings.choices.bedroom_choices",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "listings.choices.state_choices",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.http.request",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "listings.models.Realtor.objects.order_by",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "listings.models.Realtor.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "listings.models.Realtor",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "listings.models.Realtor.objects.all",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "listings.models.Realtor.objects",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "listings.models.Realtor",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.http.request",
"line_number": 31,
"usage_type": "argument"
}
] |
91458234
|
import gym
from gym import error, spaces, utils
import numpy as np
import os
from .agent import Agent
import matplotlib.pyplot as plt
ACTIONS = {'MOVE_LEFT': [0, -1], # Move left
'MOVE_RIGHT': [0, 1], # Move right
'MOVE_UP': [-1, 0], # Move up
'MOVE_DOWN': [1, 0], # Move down
'STAY': [0, 0] # don't move
}
ACTIONS_ORDERED_LIST = ['MOVE_LEFT', 'MOVE_RIGHT', 'MOVE_UP', 'MOVE_DOWN', 'STAY']
ACTIONS_DELTA_ORDERED_LIST = [ACTIONS[action] for action in ACTIONS_ORDERED_LIST]
# bgr
DEFAULT_COLOURS = {' ': [0, 0, 0], # Black background
'S': [101, 67, 254], # stag
'H': [178, 196, 47], # hare1
'G': [178, 196, 47], # hare2
'Y': [216, 30, 54], # young
'M': [159, 67, 255], # mature
'C': [238, 133, 114], # chonghe
'D': [238, 133, 114], # chonghe2
'E': [101, 67, 254], # ecalation
# Colours for agents. R value is a unique identifier
'1': [166, 90, 3],
'2': [30, 191, 252], # Blue
'3': [204, 168, 0],
'4': [154, 157, 252]}
TRUN_MATURE = 0.3
TRUN_DEATH = 0.3
SYMMETRIES = [None, "ROT1", "ROT2", "ROT3", "REF1", "REF2", "REF3", "REF4"]
SYMMETRY_INDEX = {symmetry: SYMMETRIES.index(symmetry) for symmetry in SYMMETRIES}
SYMMETRY_MAPPING = [(0, 2), (2, 1), (1, 3), (3, 0), (2, 0), (1, 2), (3, 1), (0, 3)] # [0, -0, 1, -1]
def symmetry_mapping_to_matrix(mapping):
mat = np.zeros((3, 2))
for dim in range(2):
if mapping[dim] == 0:
mat[0, dim] = 1
elif mapping[dim] == 1:
mat[0, dim] = -1
mat[2, dim] = 1
elif mapping[dim] == 2:
mat[1, dim] = 1
else:
assert mapping[dim] == 3
mat[1, dim] = -1
mat[2, dim] = 1
return mat
class GridWorldEnv(object):
def __init__(self, args, choose=0, length=5, np_random=None):
self.env_name = args.env_name
self.num_agents = args.num_agents
self.episode_length = args.episode_length
self.length = length
self.color_map = DEFAULT_COLOURS
self.share_reward = args.share_reward
self.shape_reward = args.shape_reward
self.shape_beta = args.shape_beta
self.symmetry_plan = args.symmetry_plan
self.symmetries = [None, None]
self.state_scaling = args.state_scaling
self.num_steps = 0
if np_random is None:
self.np_random = np.random.RandomState()
else:
self.np_random = np_random
if self.env_name == "StagHuntGW":
self.gore1_num = 0
self.gore2_num = 0
self.hare1_num = 0
self.hare2_num = 0
self.coop = args.coop if args.coop is not None else 5
self.defect = args.defect if args.defect is not None else 2
self.gore = args.gore if args.gore is not None else -2
# self.coop = 5
# self.defect = -2
# self.gore = 2
self.reward_randomization = args.reward_randomization
if self.reward_randomization:
# coop = [5,4,0,5,5,-5,-5,5]
# defect = [1,2,5,0,0,5,0,-5]
# gore = [-5,-2,0,5,0,-5,5,5]
coop = [5]
defect = [0]
gore = [0]
self.coop = coop[choose]
self.defect = defect[choose]
self.gore = gore[choose]
'''
coef = 2*self.np_random.rand(3)-1
self.coop = self.coop * coef[0]
self.defect = self.defect * coef[1]
self.gore = self.gore * coef[2]
'''
elif self.env_name == "HarvestGW":
self.coop = 2
self.defect = 1
self.reward_randomization = args.reward_randomization
if self.reward_randomization:
coop = [5, 10, 0, -10, 10]
defect = [1, -10, -5, 10, 1]
self.coop = coop[choose]
self.defect = defect[choose]
'''
coef = 2*self.np_random.rand(2)-1
self.coop = self.coop * coef[0]
self.defect = self.defect * coef[1]
'''
elif self.env_name == "EscalationGW":
self.coop = 1
self.coop_length = 0
self.defect_coef = args.defect_coef if args.defect_coef is not None else -0.9
self.reward_randomization = args.reward_randomization
self.stop_after_split = True if args.stop_after_split is None else args.stop_after_split
self.max_coop_steps = args.max_coop_steps
if self.reward_randomization:
coop = [1, 1, 0, 1, 1, 1]
defect_coef = [0, -2, 1, -0.5, 1, 5]
self.coop = coop[choose]
self.defect_coef = defect_coef[choose]
self.max_life = 20
self.coop_num = 0
self.realize_symmetry_plan()
self.reset_map()
self.setup_agents()
self.action_symmetry_mapping = []
self.generate_action_symmetry_mapping()
self.sym_trans_cache = dict()
self.all_sym_trans_cache = dict()
@property
def action_space(self):
action_space = []
for agent in self.agents:
action_space.append(agent.action_space)
return action_space
@property
def observation_space(self):
observation_space = []
for agent in self.agents:
observation_space.append(agent.observation_space)
return observation_space
@property
def is_stag_hunt(self):
return self.env_name == "StagHuntGW"
@property
def is_escalation(self):
return self.env_name == "EscalationGW"
@property
def is_harvest(self):
return self.env_name == "HarvestGW"
@property
def num_sym(self):
return 8
def generate_action_symmetry_mapping(self):
for symmetry in SYMMETRIES:
action_mapping = np.zeros(5)
for i in range(5):
delta = ACTIONS_DELTA_ORDERED_LIST[i]
sym_delta = self.apply_symmetry(symmetry, delta, True).tolist()
# print(ACTIONS_DELTA_ORDERED_LIST)
sym_i = ACTIONS_DELTA_ORDERED_LIST.index(sym_delta)
action_mapping[i] = sym_i
self.action_symmetry_mapping.append(action_mapping)
# print(symmetry, action_mapping)
def realize_symmetry_plan(self):
if self.symmetry_plan in SYMMETRIES:
self.symmetries = [self.symmetry_plan, self.symmetry_plan]
elif type(self.symmetry_plan) == tuple or type(self.symmetry_plan) == list:
self.symmetries = self.symmetry_plan
elif self.symmetry_plan == "other-play":
self.symmetries = [None, self.np_random.choice(SYMMETRIES[:self.num_sym])]
elif self.symmetry_plan is None:
self.symmetries = [None, None]
else:
raise NotImplementedError(self.symmetry_plan)
def apply_symmetry(self, symmetry, pos, is_delta=False, reverse=False):
z = 0 if is_delta else self.length - 1
p = [pos[0], z - pos[0], pos[1], z - pos[1]]
assert symmetry in SYMMETRIES
if reverse:
if symmetry == "ROT1":
symmetry = "ROT3"
elif symmetry == "ROT3":
symmetry = "ROT1"
mapping = SYMMETRY_MAPPING[SYMMETRIES.index(symmetry)]
return np.array([p[mapping[0]], p[mapping[1]]])
def get_obs_symmetry_trans_mat(self, symmetry, agent_id):
cache_id = "{}-{}".format(symmetry, agent_id)
if cache_id in self.sym_trans_cache:
trans = self.sym_trans_cache[cache_id]
else:
symmetry_info = self.get_obs_symmetry_info_agent(agent_id)
obs_length = 0
for s_type, s_num in symmetry_info:
if s_type == "none":
obs_length += s_num
else:
obs_length += 2 * s_num
trans = np.zeros((obs_length + 1, obs_length))
pos = 0
mapping = SYMMETRY_MAPPING[SYMMETRIES.index(symmetry)]
for s_type, s_num in symmetry_info:
for _ in range(s_num):
if s_type == "none":
trans[pos, pos] = 1.
pos += 1
else:
sym_trans = symmetry_mapping_to_matrix(mapping)
trans[pos: pos + 2, pos: pos + 2] = sym_trans[:2, :]
if s_type == "absolute":
trans[obs_length:, pos: pos + 2] = sym_trans[2:, :]
pos += 2
# print(pos)
self.sym_trans_cache[cache_id] = trans
return trans
def obs_to_symmetry_agent(self, obs, symmetry, agent_id):
z = 1. if self.state_scaling else self.length - 1
batch_size = obs.shape[0]
obs = np.concatenate((obs, np.ones((batch_size, 1)) * z), axis=1)
trans_mat = self.get_obs_symmetry_trans_mat(symmetry, agent_id)
return np.matmul(obs, trans_mat)
def obs_to_all_symmetries_agent(self, obs, agent_id):
new_obs = []
z = 1. if self.state_scaling else self.length - 1
batch_size = obs.shape[0]
obs = np.concatenate((obs, np.ones((batch_size, 1)) * z), axis=1)
for symmetry in SYMMETRIES[:self.num_sym]:
trans_mat = self.get_obs_symmetry_trans_mat(symmetry, agent_id)
new_obs.append(np.matmul(obs, trans_mat))
return new_obs
def actions_to_symmetry_agent(self, actions, symmetry, agent_id):
return self.action_symmetry_mapping[SYMMETRY_INDEX[symmetry]][actions]
def actions_to_all_symmetries_agent(self, actions, agent_id):
new_actions = []
for symmetry in SYMMETRIES[:self.num_sym]:
new_actions.append(self.actions_to_symmetry_agent(actions, symmetry, agent_id))
return new_actions
def StagHunt_setup_map(self):
self.agents_start_pos = []
self.stag_points = 0
self.hare1_points = 0
self.hare2_points = 0
points = []
num_index = 0
while num_index < (self.num_agents + 3):
index = self.np_random.randint(0, self.length, (2,)).tolist()
if index in points:
continue
else:
points.append(index)
num_index += 1
for i in range(self.num_agents):
self.agents_start_pos.append(points[i])
self.base_map[points[-3][0], points[-3][1]] = 'S'
self.stag_points += 1
self.stag_pos = np.array(points[-3])
self.base_map[points[-2][0], points[-2][1]] = 'H'
self.hare1_points += 1
self.hare1_pos = np.array(points[-2])
self.base_map[points[-1][0], points[-1][1]] = 'G'
self.hare2_points += 1
self.hare2_pos = np.array(points[-1])
def Harvest_setup_map(self):
self.agents_start_pos = []
self.life = np.zeros((self.length, self.length))
self.young_points = 0
self.young_pos = []
self.mature_points = 0
self.mature_pos = []
points = []
num_index = 0
while num_index < (self.num_agents + 1):
index = self.np_random.randint(0, self.length, (2)).tolist()
if index in points:
continue
else:
points.append(index)
num_index += 1
for i in range(self.num_agents):
self.agents_start_pos.append(points[i])
self.base_map[points[-1][0], points[-1][1]] = 'Y'
self.young_points += 1
self.young_pos.append(points[-1])
self.life[points[-1][0], points[-1][1]] += 1
def Escalation_setup_map(self):
self.escalation_over = False
self.agents_start_pos = []
points = []
num_index = 0
while num_index < (self.num_agents + 1):
index = self.np_random.randint(0, self.length, (2,)).tolist()
if index in points:
continue
else:
points.append(index)
num_index += 1
for i in range(self.num_agents):
self.agents_start_pos.append(points[i])
self.base_map[points[-1][0], points[-1][1]] = 'E'
self.escalation_points = 1
self.escalation_pos = np.array(points[-1])
def setup_agents(self):
self.coop_num = 0
self.gore1_num = 0
self.gore2_num = 0
self.hare1_num = 0
self.hare2_num = 0
self.coop_length = 0
self.agents = []
for i in range(self.num_agents):
agent = Agent(i, self.agents_start_pos[i], self.base_map, self.env_name)
self.agents.append(agent)
for i in range(self.num_agents):
self.agents[i].observation_space = self.get_obs_agent(i).shape
# print(i, self.agents[i].observation_space)
def map_to_colors(self, base_map=None, color_map=None):
"""Converts a map to an array of RGB values.
Parameters
----------
map: np.ndarray
map to convert to colors
color_map: dict
mapping between array elements and desired colors
Returns
-------
arr: np.ndarray
3-dim numpy array consisting of color map
"""
if base_map is None:
base_map = self.get_map_with_agents()
if color_map is None:
color_map = self.color_map
rgb_arr = np.zeros((base_map.shape[0], base_map.shape[1], 3), dtype=int)
for row_elem in range(base_map.shape[0]):
for col_elem in range(base_map.shape[1]):
rgb_arr[row_elem, col_elem, :] = color_map[base_map[row_elem, col_elem]]
return rgb_arr
def get_obs_agent(self, agent_id):
"""Returns observation for agent_id.
NOTE: Agents should have access only to their local observations
during decentralised execution.
"""
symmetry = self.symmetries[agent_id]
if self.env_name == 'HarvestGW':
assert symmetry is None
rgb_arr = self.map_to_colors(self.get_map_with_agents(), self.color_map)
return rgb_arr.transpose(2, 0, 1)
elif self.env_name == 'StagHuntGW':
# my pos
# print(agent_id, len(self.agents))
my_pos = self.agents[agent_id].pos
# other pos
other_pos = self.apply_symmetry(symmetry, self.agents[1 - agent_id].pos - my_pos, True)
# stag_pos
stag_pos = self.apply_symmetry(symmetry, self.stag_pos - my_pos, True)
# plant_pos
hare1_pos = self.apply_symmetry(symmetry, self.hare1_pos - my_pos, True)
hare2_pos = self.apply_symmetry(symmetry, self.hare2_pos - my_pos, True)
poses = np.concatenate([self.apply_symmetry(symmetry, my_pos), other_pos, stag_pos, hare1_pos, hare2_pos])
if self.state_scaling:
poses = poses / (self.length - 1)
step_info = self.num_steps / self.episode_length
return np.concatenate([[step_info], poses])
elif self.env_name == 'EscalationGW':
# my pos
my_pos = self.agents[agent_id].pos
# other pos
other_pos = self.apply_symmetry(symmetry, self.agents[1 - agent_id].pos - my_pos, True)
# escalation pos
escalation_pos = self.apply_symmetry(symmetry, self.escalation_pos - my_pos, True)
# return np.concatenate([my_pos]+[other_pos]+[escalation_pos]+[[self.coop_length]])
step_info = self.num_steps / self.episode_length
over_info = self.escalation_over
coop_length = self.coop_length
poses = np.concatenate([self.apply_symmetry(symmetry, my_pos), other_pos, escalation_pos])
if self.state_scaling:
poses /= self.length - 1
return np.concatenate([[step_info, over_info, coop_length], poses])
def get_obs_symmetry_info_agent(self, agent_id):
symmetry_info = list()
if self.is_stag_hunt:
symmetry_info.append(('none', 1))
symmetry_info.append(('absolute', 1))
symmetry_info.append(('relative', 4))
elif self.is_escalation:
symmetry_info.append(('none', 3))
symmetry_info.append(('absolute', 1))
symmetry_info.append(('relative', 2))
else:
raise NotImplementedError
return symmetry_info
def get_input_structure_agent(self, agent_id):
input_structure = list()
dim_p = 2
if self.env_name == 'StagHuntGW':
input_structure.append(('self', 1 + dim_p))
input_structure.append(('other', dim_p))
input_structure.append(('stag', dim_p))
input_structure.append(('plant', dim_p))
input_structure.append(('plant', dim_p))
elif self.env_name == 'EscalationGW':
input_structure.append(('self', 3 + dim_p))
input_structure.append(('other', dim_p))
input_structure.append(('escalation', dim_p))
else:
raise NotImplementedError
return input_structure
def reset_map(self):
"""Resets the map to be empty as well as a custom reset set by subclasses"""
self.base_map = np.full((self.length, self.length), ' ')
if self.env_name == "StagHuntGW":
self.StagHunt_setup_map()
elif self.env_name == "HarvestGW":
self.Harvest_setup_map()
elif self.env_name == "EscalationGW":
self.Escalation_setup_map()
def get_map_with_agents(self):
"""Gets a version of the environment map where generic
'P' characters have been replaced with specific agent IDs.
Returns:
2D array of strings representing the map.
"""
map_with_agents = np.copy(self.base_map)
for i in range(self.num_agents):
char_id = str(i + 1) # agent-i
if map_with_agents[self.agents[i].pos[0], self.agents[i].pos[1]] == ' ':
map_with_agents[self.agents[i].pos[0], self.agents[i].pos[1]] = char_id
elif map_with_agents[self.agents[i].pos[0], self.agents[i].pos[1]] == 'E':
map_with_agents[self.agents[i].pos[0], self.agents[i].pos[1]] = '4'
else:
map_with_agents[self.agents[i].pos[0], self.agents[i].pos[1]] = '3'
return map_with_agents
def update_moves(self, agent_actions):
for agent_id, action in agent_actions.items():
symmetry = self.symmetries[agent_id]
agent = self.agents[agent_id]
selected_action = ACTIONS[action]
# if self.is_escalation and self.escalation_over and selected_action == 'STAY':
# self.agents[agent_id].reward_this_turn += 0.1
new_pos = agent.get_pos() + self.apply_symmetry(symmetry, selected_action, True, True)
# allow the agents to confirm what position they can move to
agent.update_agent_pos(new_pos)
def update_stag(self):
if self.stag_points > 0:
minimum = -1
minimum_indices = []
for i in range(self.num_agents):
x = self.agents[i].get_pos() - self.stag_pos
dist = np.sum(np.square(x))
if dist < minimum or minimum == -1:
minimum = dist
minimum_indices = [i]
elif dist == minimum:
minimum_indices.append(i)
minimum_index = self.np_random.choice(minimum_indices)
# move
x = self.agents[minimum_index].get_pos() - self.stag_pos
sign_x = np.sign(x)
if 0 in sign_x:
if sign_x[0] == 0 and sign_x[1] == 0:
pass
else:
temp_pos = self.stag_pos + sign_x
if self.base_map[temp_pos[0], temp_pos[1]] == ' ':
if self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'C':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'H'
elif self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'D':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'G'
else:
self.base_map[self.stag_pos[0], self.stag_pos[1]] = ' '
self.stag_pos = temp_pos
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'S'
else:
if self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'C':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'H'
elif self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'D':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'G'
else:
self.base_map[self.stag_pos[0], self.stag_pos[1]] = ' '
self.stag_pos = temp_pos
if self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'H':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'C'
elif self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'G':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'D'
else:
temp_sign = np.copy(sign_x)
choose = self.np_random.randint(0, 2)
temp_sign[choose] = 0
temp_pos = self.stag_pos + temp_sign
if self.base_map[temp_pos[0], temp_pos[1]] == 'H' or self.base_map[temp_pos[0], temp_pos[1]] == 'G':
temp_sign1 = np.copy(sign_x)
temp_sign1[1 - choose] = 0
temp_pos1 = self.stag_pos + temp_sign1
if self.base_map[temp_pos1[0], temp_pos1[1]] == ' ':
if self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'C':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'H'
elif self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'D':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'G'
else:
self.base_map[self.stag_pos[0], self.stag_pos[1]] = ' '
self.stag_pos = temp_pos1
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'S'
else:
if self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'C':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'H'
elif self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'D':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'G'
else:
self.base_map[self.stag_pos[0], self.stag_pos[1]] = ' '
self.stag_pos = temp_pos1
if self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'H':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'C'
elif self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'G':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'D'
else:
if self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'C':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'H'
elif self.base_map[self.stag_pos[0], self.stag_pos[1]] == 'D':
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'G'
else:
self.base_map[self.stag_pos[0], self.stag_pos[1]] = ' '
self.stag_pos = temp_pos
self.base_map[self.stag_pos[0], self.stag_pos[1]] = 'S'
def StagHuntUpdateMap(self):
while self.stag_points < 1:
index = self.np_random.randint(0, self.length, (2)).tolist()
map_with_agents = self.get_map_with_agents()
if map_with_agents[index[0], index[1]] == ' ':
self.base_map[index[0], index[1]] = 'S'
self.stag_points += 1
self.stag_pos = np.array(index)
while self.hare1_points < 1:
index = self.np_random.randint(0, self.length, (2)).tolist()
if self.get_map_with_agents()[index[0], index[1]] == ' ':
self.base_map[index[0], index[1]] = 'H'
self.hare1_points += 1
self.hare1_pos = np.array(index)
while self.hare2_points < 1:
index = self.np_random.randint(0, self.length, (2)).tolist()
if self.get_map_with_agents()[index[0], index[1]] == ' ':
self.base_map[index[0], index[1]] = 'G'
self.hare2_points += 1
self.hare2_pos = np.array(index)
def EscalationUpdateMap(self):
actions = [[0, 1], [0, -1], [-1, 0], [1, 0]]
last_pos = self.escalation_pos
while self.escalation_points == 0:
next_choose = self.np_random.randint(0, 4)
next_pos = last_pos + actions[next_choose]
next_row, next_col = next_pos
if next_row < 0 or next_row >= self.length or next_col < 0 or next_col >= self.length:
self.escalation_points = 0
else:
self.base_map[last_pos[0], last_pos[1]] = ' '
self.escalation_points = 1
self.escalation_pos = next_pos
self.base_map[next_pos[0], next_pos[1]] = 'E'
def HarvestUpdateMap(self):
for i in range(self.life.shape[0]):
for j in range(self.life.shape[1]):
if self.base_map[i, j] == 'Y' or self.base_map[i, j] == 'M':
self.life[i][j] += 1
# mature to death
old_mature_pos = np.copy(self.mature_pos)
for i, mature in enumerate(old_mature_pos):
if self.life[mature[0], mature[1]] >= self.max_life or self.np_random.rand(1)[0] <= TRUN_DEATH:
self.mature_points -= 1
self.mature_pos.remove(mature.tolist())
self.base_map[mature[0], mature[1]] = ' '
self.life[mature[0], mature[1]] = 0
# young to mature
old_young_pos = np.copy(self.young_pos)
for i, young in enumerate(old_young_pos):
if self.life[young[0], young[1]] >= (self.max_life - 1) or self.np_random.rand(1)[0] <= TRUN_MATURE:
self.base_map[young[0], young[1]] = 'M'
self.mature_points += 1
self.mature_pos.append(young.tolist())
self.young_points -= 1
self.young_pos.remove(young.tolist())
num_plants_old = self.young_points + self.mature_points
map_with_agents = self.get_map_with_agents()
# young born
if num_plants_old < 4:
while (self.young_points + self.mature_points) - num_plants_old < 1:
index = self.np_random.randint(0, self.length, (2)).tolist()
if map_with_agents[index[0], index[1]] == ' ':
self.base_map[index[0], index[1]] = 'Y'
self.young_points += 1
self.young_pos.append(index)
self.life[index[0], index[1]] += 1
def HarvestConsume(self, pos0, pos1):
"""Defines how an agent interacts with the char it is standing on"""
charA = self.base_map[pos0[0], pos0[1]]
charB = self.base_map[pos1[0], pos1[1]]
if pos0 == pos1:
if charA == 'M':
self.coop_num += 1
self.agents[0].reward_this_turn += self.coop
self.agents[1].reward_this_turn += self.coop
self.mature_points -= 1
self.mature_pos.remove(pos0)
self.base_map[pos0[0], pos0[1]] = ' '
self.life[pos0[0], pos0[1]] = 0
elif charA == 'Y':
index = self.np_random.randint(0, 2)
self.agents[index].reward_this_turn += self.defect
self.young_points -= 1
self.young_pos.remove(pos0)
self.base_map[pos0[0], pos0[1]] = ' '
self.life[pos0[0], pos0[1]] = 0
else:
if charA == 'M':
self.coop_num += 1
self.agents[0].reward_this_turn += self.coop
self.agents[1].reward_this_turn += self.coop
self.mature_points -= 1
self.mature_pos.remove(pos0)
self.base_map[pos0[0], pos0[1]] = ' '
self.life[pos0[0], pos0[1]] = 0
if charB == 'M':
self.agents[0].reward_this_turn += self.coop
self.agents[1].reward_this_turn += self.coop
self.mature_points -= 1
self.mature_pos.remove(pos1)
self.base_map[pos1[0], pos1[1]] = ' '
self.life[pos1[0], pos1[1]] = 0
elif charB == 'Y':
self.agents[1].reward_this_turn += self.defect
self.young_points -= 1
self.young_pos.remove(pos1)
self.base_map[pos1[0], pos1[1]] = ' '
self.life[pos1[0], pos1[1]] = 0
elif charA == 'Y':
self.agents[0].reward_this_turn += self.defect
self.young_points -= 1
self.young_pos.remove(pos0)
self.base_map[pos0[0], pos0[1]] = ' '
self.life[pos0[0], pos0[1]] = 0
if charB == 'M':
self.coop_num += 1
self.agents[0].reward_this_turn += self.coop
self.agents[1].reward_this_turn += self.coop
self.mature_points -= 1
self.mature_pos.remove(pos1)
self.base_map[pos1[0], pos1[1]] = ' '
self.life[pos1[0], pos1[1]] = 0
elif charB == 'Y':
self.agents[1].reward_this_turn += self.defect
self.young_points -= 1
self.young_pos.remove(pos1)
self.base_map[pos1[0], pos1[1]] = ' '
self.life[pos1[0], pos1[1]] = 0
elif charA == ' ':
if charB == 'M':
self.coop_num += 1
self.agents[0].reward_this_turn += self.coop
self.agents[1].reward_this_turn += self.coop
self.mature_points -= 1
self.mature_pos.remove(pos1)
self.base_map[pos1[0], pos1[1]] = ' '
self.life[pos1[0], pos1[1]] = 0
elif charB == 'Y':
self.agents[1].reward_this_turn += self.defect
self.young_points -= 1
self.young_pos.remove(pos1)
self.base_map[pos1[0], pos1[1]] = ' '
self.life[pos1[0], pos1[1]] = 0
def EscalationConsume(self, pos0, pos1):
if self.escalation_over:
return
# self.agents[0].reward_this_turn -= 0.1
# self.agents[1].reward_this_turn -= 0.1
charA = self.base_map[pos0[0], pos0[1]]
charB = self.base_map[pos1[0], pos1[1]]
if charA == 'E':
if charB == 'E':
self.agents[0].reward_this_turn += self.coop
self.agents[1].reward_this_turn += self.coop
self.coop_length += 1
self.coop_num += 1
self.escalation_points = 0
if self.max_coop_steps is not None and self.coop_length == self.max_coop_steps:
self.escalation_over = True
else:
if self.coop_length > 0:
self.agents[0].reward_this_turn = self.defect_coef * self.coop_length
# self.agents[1].reward_this_turn += 2
# self.agents[0].done = True
# self.agents[1].done = True
self.escalation_over = True
else:
if charB == 'E' and self.coop_length > 0:
self.agents[1].reward_this_turn = self.defect_coef * self.coop_length
# self.agents[0].reward_this_turn += 2
# self.agents[0].done = True
# self.agents[1].done = True
self.escalation_over = True
elif self.coop_length > 0:
# if self.coop_length == 1:
# p0 = np.array(pos0)
# p1 = np.array(pos1)
# if all(p0 == p1) and np.abs(p0 - self.escalation_pos).sum() == 1:
# self.agents[0].reward_this_turn -= 5
# self.agents[1].reward_this_turn -= 5
# if pos0 != pos1:
# self.agents[0].reward_this_turn += 2
# self.agents[1].reward_this_turn += 2
if self.stop_after_split:
self.escalation_over = True
else:
self.coop_length = 0
last_pos = self.escalation_pos
self.base_map[last_pos[0]][last_pos[1]] = ' '
new_pos = self.np_random.randint(self.length, size=(2,))
self.escalation_points = 1
self.escalation_pos = new_pos
self.base_map[new_pos[0]][new_pos[1]] = 'E'
# if self.coop_length == 0:
# self.agents[0].reward_this_turn -= 0.1
# self.agents[1].reward_this_turn -= 0.1
def StagHuntConsume(self, pos0, pos1):
"""Defines how an agent interacts with the char it is standing on"""
charA = self.base_map[pos0[0], pos0[1]]
charB = self.base_map[pos1[0], pos1[1]]
if charA == 'S':
if charB == 'S':
self.coop_num += 1
self.agents[0].reward_this_turn += self.coop
self.agents[1].reward_this_turn += self.coop
self.stag_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
elif charB == 'H':
self.gore1_num += 1
self.agents[0].reward_this_turn += self.gore
self.stag_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.defect
self.hare1_points -= 1
self.hare2_num += 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'G':
self.gore1_num += 1
self.agents[0].reward_this_turn += self.gore
self.stag_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.defect
self.hare2_points -= 1
self.hare2_num += 1
self.base_map[pos1[0], pos1[1]] = ' '
else:
self.gore1_num += 1
self.agents[0].reward_this_turn += self.gore
self.stag_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
elif charA == 'H':
if charB == 'S':
self.gore2_num += 1
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.hare1_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.gore
self.stag_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'D':
self.gore2_num += 1
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.hare1_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.gore
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.stag_points -= 1
self.hare2_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'G':
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.hare1_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.hare2_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'H':
self.hare1_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
index = self.np_random.randint(0, 2)
self.agents[index].reward_this_turn += self.defect
if index == 0:
self.hare1_num += 1
else:
self.hare2_num += 1
else:
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.hare1_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
elif charA == 'G':
if charB == 'S':
self.gore2_num += 1
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.hare2_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.gore
self.stag_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'C':
self.gore2_num += 1
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.hare2_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.gore
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.stag_points -= 1
self.hare1_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'H':
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.hare2_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.hare1_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'G':
self.hare2_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
index = self.np_random.randint(0, 2)
self.agents[index].reward_this_turn += self.defect
if index == 0:
self.hare1_num += 1
else:
self.hare2_num += 1
else:
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.hare2_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
elif charA == 'C':
if charB == 'C':
self.coop_num += 1
self.agents[0].reward_this_turn += self.coop
self.agents[1].reward_this_turn += self.coop
index = self.np_random.randint(0, 2)
self.agents[index].reward_this_turn += self.defect
self.hare2_num += 1
self.stag_points -= 1
self.hare1_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
elif charB == 'G':
self.gore1_num += 1
self.agents[0].reward_this_turn += self.gore
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.stag_points -= 1
self.hare1_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.hare2_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
else:
self.gore1_num += 1
self.agents[0].reward_this_turn += self.gore
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.stag_points -= 1
self.hare1_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
elif charA == 'D':
if charB == 'D':
self.coop_num += 1
self.agents[0].reward_this_turn += self.coop
self.agents[1].reward_this_turn += self.coop
index = self.np_random.randint(0, 2)
self.agents[index].reward_this_turn += self.defect
if index == 0:
self.hare1_num += 1
else:
self.hare2_num += 1
self.stag_points -= 1
self.hare2_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
elif charB == 'H':
self.gore1_num += 1
self.agents[0].reward_this_turn += self.gore
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.stag_points -= 1
self.hare2_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.hare1_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
else:
self.gore1_num += 1
self.agents[0].reward_this_turn += self.gore
self.agents[0].reward_this_turn += self.defect
self.hare1_num += 1
self.stag_points -= 1
self.hare2_points -= 1
self.base_map[pos0[0], pos0[1]] = ' '
elif charA == ' ':
if charB == 'S':
self.gore2_num += 1
self.agents[1].reward_this_turn += self.gore
self.stag_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'H':
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.hare1_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'G':
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.hare2_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'C':
self.gore2_num += 1
self.agents[1].reward_this_turn += self.gore
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.stag_points -= 1
self.hare1_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
elif charB == 'D':
self.gore2_num += 1
self.agents[1].reward_this_turn += self.gore
self.agents[1].reward_this_turn += self.defect
self.hare2_num += 1
self.stag_points -= 1
self.hare2_points -= 1
self.base_map[pos1[0], pos1[1]] = ' '
def close(self):
self.agents = []
return None
def seed(self, seed):
if seed is None:
self.np_random.seed(1)
else:
self.np_random.seed(seed)
def text_render(self):
map_with_agents = self.get_map_with_agents()
print(map_with_agents)
def render(self, filename=None):
""" Creates an image of the map to plot or save.
Args:
path: If a string is passed, will save the image
to disk at this location.
"""
map_with_agents = self.get_map_with_agents()
rgb_arr = self.map_to_colors(map_with_agents)
plt.figure()
plt.imshow(rgb_arr, interpolation='nearest')
if 'StagHunt' in self.env_name:
text = "#Coop.-Hunt = " + str(self.coop_num) + "/" + str(self.episode_length)
plt.text(0, 0, text, fontdict={'size': 10, 'color': 'white'})
text = "#Single-Hunt = " + str(self.gore1_num + self.gore2_num) + "/" + str(self.episode_length)
plt.text(0, 0.2, text, fontdict={'size': 10, 'color': 'white'})
text = "#Apple = " + str(self.hare1_num + self.hare2_num) + "/" + str(self.episode_length)
plt.text(0, 0.4, text, fontdict={'size': 10, 'color': 'white'})
# text = "agent2_gore_num = " + str(self.gore2_num) + "/" + str(self.episode_length)
# plt.text(0, 0.6, text, fontdict={'size': 10, 'color': 'white'})
# text = "agent2_hare_num = " + str(self.hare2_num) + "/" + str(self.episode_length)
# plt.text(0, 0.8, text, fontdict={'size': 10, 'color': 'white'})
plt.title("Monster-Hunt")
elif 'Escalation' in self.env_name:
text = "#Coop. Length L = " + str(self.coop_num) + "/" + str(self.episode_length)
plt.text(0, 0, text, fontdict={'size': 10, 'color': 'white'})
plt.title("Escalation")
if filename is not None:
plt.savefig(filename)
return rgb_arr.astype(np.uint8)
def step(self, actions): # action [1,2,4,3,7]
"""A single environment step. Returns reward, terminated, info."""
self.num_steps += 1
# actions = [np.argmax(a) for a in actions]
agent_actions = {}
for i in range(self.num_agents):
agent_action = self.agents[i].action_map(actions[i])
agent_actions[i] = agent_action
if self.env_name == 'StagHuntGW':
self.update_stag()
# move
self.update_moves(agent_actions)
pos0 = self.agents[0].get_pos().tolist()
pos1 = self.agents[1].get_pos().tolist()
if self.env_name == 'StagHuntGW':
self.StagHuntConsume(pos0, pos1)
self.StagHuntUpdateMap()
elif self.env_name == 'HarvestGW':
self.HarvestConsume(pos0, pos1)
self.HarvestUpdateMap()
elif self.env_name == 'EscalationGW':
self.EscalationConsume(pos0, pos1)
self.EscalationUpdateMap()
observations = []
rewards = []
dones = []
infos = {0: {}, 1: {}, "global": {'collective_return': [], 'coop&coop_num': [], 'gore1_num': [],
'gore2_num': [], 'hare1_num': [], 'hare2_num': []}}
for i in range(self.num_agents):
observations.append(self.get_obs_agent(i))
reward = self.agents[i].compute_reward() * 0.1
rewards.append(reward)
dones.append(self.agents[i].get_done())
collective_return = 0
for i in range(self.num_agents):
collective_return += self.agents[i].collective_return
infos['collective_return'] = collective_return
infos['coop&coop_num'] = self.coop_num
if self.env_name == 'StagHuntGW':
infos['gore1_num'] = self.gore1_num
infos['gore2_num'] = self.gore2_num
infos['hare1_num'] = self.hare1_num
infos['hare2_num'] = self.hare2_num
global_reward = np.sum(rewards)
if self.share_reward:
rewards = [global_reward] * self.num_agents
if self.shape_reward:
rewards = list(map(lambda x: x[0] * self.shape_beta + x[1] * (1 - self.shape_beta),
zip([global_reward] * self.num_agents, rewards)))
return observations, rewards, dones, infos
def reset(self):
"""Reset the environment. Required after each full episode.
Returns initial observations and states.
"""
self.realize_symmetry_plan()
self.reset_map()
self.setup_agents()
self.num_steps = 0
observations = []
for i in range(self.num_agents):
observations.append(self.get_obs_agent(i))
return observations
| null |
pettingzoo/mappo_ssd/utils/grid_world.py
|
grid_world.py
|
py
| 48,912 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.zeros",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "agent.action_space",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "agent.observation_space",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "agent.Agent",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "agent.get_pos",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "agent.update_agent_pos",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 545,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 597,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 603,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 1037,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1037,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 1038,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1038,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 1042,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1042,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 1044,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1044,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 1046,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1046,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 1051,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1051,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 1054,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1054,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 1055,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1055,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 1058,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1058,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 1060,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 1113,
"usage_type": "call"
}
] |
508639267
|
from osgeo import gdal, gdalnumeric, ogr, gdal_array
import sys
sys.path.append(r'C:\Program Files\GDAL')
# import ogr
# import gdal_array
from skimage import measure
import numpy as np
import json
import os
import datetime
from shapely.geometry import Polygon
# get all the fillpath in the directory include sub-directory
def getfilepath(curDir, filelist, ext=('.TIF', '.tif', '.PNG', '.png', '.JPG', '.jpg')):
if os.path.isfile(curDir):
if curDir.lower().endswith(ext):
filelist.append(curDir)
else:
dir_or_files = os.listdir(curDir)
for dir_file in dir_or_files:
dir_file_path = os.path.join(curDir, dir_file)
# check is file or directory
if os.path.isdir(dir_file_path):
getfilepath(dir_file_path, filelist, ext)
else:
# extension_ = dir_file_path.split('.')[-1]
# if (extension_.lower() in ext):
if dir_file_path.endswith(ext):
filelist.append(dir_file_path)
#
def raster2array(rasters,band_no=1):
"""
Arguments:
rast A gdal Raster object
band_no band numerical order
Example :
raster = gdal.Open(rasterfn)
raster2array(raster,1)
"""
bands = rasters.RasterCount
if band_no>0 and band_no <=bands:
band = rasters.GetRasterBand(band_no)
array = band.ReadAsArray()
else:
array = rasters.ReadAsArray()
return array
# This function will convert the rasterized clipper shapefile
# to a mask for use within GDAL.
def imageToArray(i):
"""
Converts a Python Imaging Library array to a
gdalnumeric image.
"""
a=gdalnumeric.fromstring(i.tobytes(),'b')
a.shape=i.im.size[1], i.im.size[0]
return a
#
def coord2pixelOffset(geotransform, x, y):
"""
Arguments:
geotransform A gdal transform object
x world coordinate x
y world coordinate y
return pixel position in image
Example :
raster = gdal.Open(rasterfn)
geotransform = raster.GetGeoTransform()
coord2pixel(geotransform,xCoord,yCoord)
"""
#left top
originX = geotransform[0]
originY = geotransform[3]
#pixel resolution
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
#ax rotate (here not used)
rotateX = geotransform[2]
rotateY = geotransform[4]
xOffset = int((x - originX) / pixelWidth)
yOffset = int((y - originY) / pixelHeight)
return xOffset, yOffset
#
def getUniqueValue(inL, column):
# intilize a null list
unique_list = []
if (len(inL) == 0): return unique_list
count = len(inL[0])
if column > count: return unique_list
# traverse for all elements
for x in inL:
# check if exists in unique_list or not
if x[column - 1] not in unique_list:
unique_list.append(x[column - 1])
return unique_list
def pixeloffset2coord(geoTransform,pixel_xOffset,pixel_yOffset):
"""
geoTransform: a gdal geoTransform object
pixel_xOffset:
pixel_yOffset:
return: coords
"""
#left top
originX = geoTransform[0]
originY = geoTransform[3]
#pixel resolution
pixelWidth = geoTransform[1]
pixelHeight = geoTransform[5]
# calculate coordinates
coordX = originX+ pixelWidth*pixel_xOffset
coordY = originY+pixelHeight*pixel_yOffset
return coordX,coordY
INFO = {
"description": "sidewalk test Dataset",
"url": "",
"version": "0.1.0",
"year": 2019,
"contributor": "czh_njit",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Attribution-NonCommercial-ShareAlike License",
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
}
]
CATEGORIES = [
{
'id': 1,
'name': 'sidewalk',
'supercategory': '',
},
# {
# 'id': 2,
# 'name': 'background',
# 'supercategory': '',
# },
]
class czhTiff2Json():
"""
create annotation file from tiff file,used for satellite image or aerial image object detection/instance segmentation
"""
def __init__(self,tifFns,lbltiffPath,jsonFn,shpFn="",mode=0):
"""
geotiff?
Arguments:
tifFn: tif file name or path (in)
jsonFn:json file name (out)
shpFn: shapefile name (in)
mode: two ways to get json file,one get from tiff file ,another from shape file
"""
self.tifFns = tifFns
self.lbltiffPath = lbltiffPath
self.jsonFn = jsonFn
self.mode = mode
if mode ==1 :
self.shpFn = shpFn
def createJson(self):
'''
Creates the COCO-formatted annotation files
'''
if self.mode ==1:
self.createJsonFromShape()
else:
self.createJsonFromTiffs()
def createJsonFromShape(self):
pass
def createJsonFromTiffs(self):
#check self.tifFns if it's file open or traverse directory
#no directory nest?
lstTiff=[]
if os.path.isdir(self.tifFns):
getfilepath(self.tifFns, lstTiff)
elif os.path.isfile(self.tifFns):
# self.createJsonFromTiffFile(self.tifFns)
lstTiff.append(self.tifFns)
else:
print("input path or directory is error!")
if len(lstTiff)>0:
print(lstTiff)
self.createJsonFromTiffFiles(lstTiff)
def createJsonFromTiffFiles(self,tiffFns):
#check json if exist open else create
# open json
if os.path.exists(self.jsonFn):
self.coco_output = json.load(self.jsonFn)
else:
self.coco_output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
annotation_idx = 1
for img_idx, tiffn in enumerate(tiffFns):
self.createJsonFromTiffFile(tiffn, img_idx+1, 1, annotation_idx + 10000 * img_idx)
with open(self.jsonFn, 'w') as output_json_file:
json.dump(self.coco_output, output_json_file)
def createJsonFromTiffFile(self, tiff_filepath, img_idx, band_no=1, annotation_idx=1):
print("Processing: ", tiff_filepath)
rasters = gdal.Open(tiff_filepath)
raster_array = raster2array(rasters,band_no)
#get size of image
img_Width = rasters.RasterXSize
img_Height = rasters.RasterYSize
img_size = [img_Width,img_Height]
#create image_info
tiff_filepath = os.path.join(self.lbltiffPath, os.path.basename(tiff_filepath))
image_info = self.create_image_info(img_idx,tiff_filepath,img_size)
self.coco_output["images"].append(image_info)
# create annotation
polygons = self.binaryMask2Polygon(raster_array)
for idx,polygon in enumerate(polygons):
# print(type(polygon), polygon.size)
if polygon.size > 7:
category_info ={'id':1,"is_crowd":0}
annotatin_info =self.create_annotation_info(idx+annotation_idx,img_idx,category_info,polygon,img_size)
self.coco_output["annotations"].append(annotatin_info)
def binaryMask2Polygon(self,binaryMask):
polygons =[]
padded_binary_mask = np.pad(binaryMask, pad_width=1, mode='constant', constant_values=0)
contours = measure.find_contours(padded_binary_mask,0.5)
contours = np.subtract(contours, 1)
def closeContour(contour):
if not np.array_equal(contour[0], contour[-1]):
contour = np.vstack((contour, contour[0]))
return contour
for contour in contours:
contour = closeContour(contour)
contour = measure.approximate_polygon(contour, 1)
if len(contour)<3:
continue
contour = np.flip(contour,axis =1)
# segmentation = contour.ravel().tolist()
#
# # after padding and subtracting 1 we may get -0.5 points in our segmentation
# segmentation = [0 if i < 0 else i for i in segmentation]
# polygons.append(segmentation)
polygons.append(contour)
return polygons
def create_image_info(self,image_id, file_name, image_size,
date_captured=datetime.datetime.utcnow().isoformat(' '),
license_id=1, coco_url="", flickr_url=""):
image_info = {
"id": image_id,
"file_name": file_name,
"width": image_size[0],
"height": image_size[1],
"date_captured": date_captured,
"license": license_id,
"coco_url": coco_url,
"flickr_url": flickr_url
}
return image_info
def create_annotation_info(self,annotation_id, image_id, category_info, segmentation,
image_size=None, tolerance=2, bounding_box=None):
try:
polygon = Polygon(np.squeeze(segmentation))
# print(type(polygon))
area =polygon.area
segmentation = segmentation.ravel().tolist()
# # after padding and subtracting 1 we may get -0.5 points in our segmentation
bbx =[0 if i < 0 else int(i) for i in list(polygon.bounds)]
segmentation = [0 if i < 0 else int(i) for i in segmentation]
annotation_info = {
"id": annotation_id,
"image_id": image_id,
"category_id": category_info["id"],
"iscrowd": category_info["is_crowd"],
"area": area,
"bbox": bbx,
"segmentation": [segmentation],
"width": image_size[0],
"height": image_size[1],
}
except Exception as e:
print("Error in create_annotation_info():", e)
return annotation_info
# test = czhTiff2Json("D:\\2019\\njit learning\\201909\\sidewalk extract\\czhSidewalkExtract\\val\\label","D:\\2019\\njit learning\\201909\\sidewalk extract\\czhSidewalkExtract\\val\\images\\","D:\\2019\\njit learning\\201909\\sidewalk extract\\czhSidewalkExtract\\val\\label\\sidewalk_val.json")
test = czhTiff2Json(r"L:\Datasets\AIRS\val\labels", r"L:\Datasets\AIRS\val\images", r"L:\Datasets\AIRS\val\AIRS_val.json")
# test.createJson()
| null |
czhUtils.py
|
czhUtils.py
|
py
| 10,600 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdalnumeric.fromstring",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "osgeo.gdalnumeric",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "skimage.measure.find_contours",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "skimage.measure",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "numpy.subtract",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "skimage.measure.approximate_polygon",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "skimage.measure",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "numpy.flip",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "shapely.geometry.Polygon",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 324,
"usage_type": "call"
}
] |
453111214
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import os, re
import markdown as md
from constants import places
from utilities import indent, renderDate
class Post(object):
'''
Object that holds information about a post. Can render itself into HTML.
'''
def __init__(self, filename, title, tags, kind, body):
super(Post, self).__init__()
self.title = title
self.tags = tags
self.kind = kind
self.setBody(body)
self.filename = filename
self.template = None
self.datePublished = None
self.url = None
self.link = None
def setBody(self, body):
self.body = body
self.bodyHTML = md.markdown(self.body)
def setTemplate(self, filename):
templatePath = os.path.join('templates', filename) + '.html'
if os.path.exists(templatePath):
with open(templatePath) as templateFile:
self.template = templateFile.readlines()
else:
raise Exception('Template file "%s" does not exist.'%(filename))
def markdown(self):
md = '---\n'
md += 'title: %s\n'%(self.title)
md += 'tags: %s\n'%(', '.join(self.tags))
md += 'kind: %s\n'%(self.kind)
if self.link:
md += 'link: %s\n'%(self.link)
if self.datePublished:
md += 'published: %s\n'%(self.datePublished)
md += '---\n\n'
md += self.body
return md
def render(self):
tagsHTML = ', '.join(['<a href="%s%s">%s</a>'%(places('tags'), t, t) for t in self.tags]) if len(self.tags) > 0 else ''
if self.template:
copy = [] + self.template
if self.datePublished:
publishDate = self.datePublished
else:
publishDate = renderDate()
for i in range(len(self.template)):
copy[i] = re.sub(r'::TITLE::', self.title, copy[i])
copy[i] = re.sub(r'::DATE::', publishDate, copy[i])
copy[i] = re.sub(r'::BASE_URL::', places('root'), copy[i])
url = self.url if self.url else '#'
if self.link:
copy[i] = re.sub(r'::LINK_URL::', self.link, copy[i])
else:
copy[i] = re.sub(r'::LINK_URL::', r'::POST_URL::', copy[i])
copy[i] = re.sub(r'::POST_URL::', url, copy[i])
if re.search(r'::TAGS::', copy[i]):
if len(tagsHTML) > 0:
copy[i] = re.sub(r'::TAGS::', tagsHTML, copy[i])
else:
copy[i] = ''
if re.search(r'::BODY::', copy[i]):
copy[i] = indent(self.bodyHTML, copy[i].count('\t'))
html = ''.join(copy)
copy = html.split('\n')
for i in range(len(copy)):
if re.search(r'<p><img alt=".*" src=".*" /></p>', copy[i]):
numTabs = copy[i].count('\t')
components = copy[i].split('"')
altText = '"'.join(components[1:-3])
src = '"'.join(components[-2:-1])
figure = '<figure>\n\t<img alt="%s" src="%s" />\n\t<figcaption>%s</figcaption>\n</figure>'%(altText, src, altText)
copy[i] = indent(figure, numTabs)
return '\n'.join(copy)
return self.bodyHTML
def renderTo(self, folderName):
fileDirectory = os.path.dirname(self.filename)
fileNameMD = os.path.basename(self.filename)
fileNameHTML = re.sub('md', 'html', fileNameMD)
# fileNameHTML = re.sub(r'\s', r'-', self.title.encode('utf8').lower().strip('?')) + '.html'
filePath = os.path.join(fileDirectory, folderName, fileNameHTML)
if not os.path.exists(os.path.dirname(filePath)):
raise Exception('Output folder "%s" does not exist.'%(os.path.dirname(filePath)))
with open(filePath, 'w') as outfile:
outfile.write(self.render())
def saveTo(self, folderName):
fileDirectory = os.path.dirname(self.filename)
fileName = os.path.basename(self.filename)
filePath = os.path.join(folderName, fileName)
if not os.path.exists(os.path.dirname(filePath)):
raise Exception('Output folder "%s" does not exist.'%(os.path.dirname(filePath)))
with open(filePath, 'w') as outfile:
outfile.write(self.markdown())
| null |
engine/post.py
|
post.py
|
py
| 4,478 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "markdown.markdown",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "constants.places",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "utilities.renderDate",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "constants.places",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "utilities.indent",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "utilities.indent",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 105,
"usage_type": "attribute"
}
] |
338598408
|
import os
import re
import time
import numpy as np
from subprocess import Popen, PIPE
from ipyparallel import Client
from ram.aws.stream_reader import NonBlockingStreamReader as NBSR
aws_data = os.path.join(os.getenv('DATA'), 'ram', 'aws')
class ClusterController(object):
configured_regions = ['us-west-1a', 'us-west-2b']
aws_config_base = os.path.join(os.getenv('GITHUB'), 'ram', 'ram', 'aws',
'config')
def __init__(self):
# check that starcluster is installed and connectivity
assert self.verify_sc_setup()
self.islive = False
def verify_sc_setup(self):
# verify base config exists
assert os.path.exists(self.aws_config_base)
# verify starcluster installed on host machine
_ = self._star_cmd("starcluster --help")
# verify aws connection with base config
self.get_live_clusters()
return True
def set_config(self, n_nodes, region=None, inst_type='m3.medium'):
'''
Create a config for a particular cluster instance. Fill in fields
for instance_type, region, n_nodes, and spot bid. Write the
config out to /data/ram/aws/configs.
'''
self._n_nodes = n_nodes
regionbid = self.get_region_bid(inst_type, region)
if regionbid is None:
return
else:
self._region = regionbid[0]
bid = regionbid[1]
# Read in base config
with open(self.aws_config_base, 'r') as file:
configdata = file.read()
# Configure and save as new
inst_config = os.path.join(aws_data, 'configs',
'config_{0}'.format(self._region))
instdata = re.sub(r'CLUSTER_SIZE\s=\s\d+',
'CLUSTER_SIZE = {0}'.format(n_nodes),
configdata)
instdata = re.sub(r'NODE_INSTANCE_TYPE.*?\n',
'NODE_INSTANCE_TYPE = {0}\n'.format(inst_type),
instdata)
instdata = re.sub(r'SPOT_BID.*?\n',
'SPOT_BID = {0}\n'.format(bid),
instdata)
with open(inst_config, 'w') as file:
file.write(instdata)
self._config = inst_config
return
def get_region_bid(self, inst_type, region=None):
'''
Selects an available region based on configured_regions and any
live clusters. Prompts user for confirmation and returns a spot bid
'''
clusters = self.get_live_clusters()
inuse = [c['Zone'] for c in clusters.itervalues()]
avail = list(set(self.configured_regions) - set(inuse))
if len(avail) == 0:
print('All available regions are in use')
return
region = region if region is not None else avail[0]
if region not in avail:
print('Unable to provision region {0}'.format(region))
return
spot_info = self.get_spot_history(inst_type, region)
conf = raw_input(('{0} in {1}: live={2}, avg={3}, max={4}.\n\n'
'Enter max bid, or enter x to exit: ').format(
inst_type, region, spot_info['live'],
spot_info['avg'], spot_info['max']))
if conf in ['n','x']:
return
return (region, str(float(conf)))
def launch_cluster(self):
'''
Launch cluster and wait for completion
'''
assert os.path.exists(self._config)
cmd = 'starcluster -c {0} -r {1} start -c {2} {2}'.format(
self._config, self._region[:-1], self._region)
print('Launching Cluster')
stdout = self._star_cmd(cmd)
print('Cluster Launched Successfully')
self.islive = True
# Setup parallel client
ipy_json = re.findall(r"client\s=\sClient\('(.*?)'", stdout)[0]
ipy_ssh = re.findall(r"sshkey='(.*?)'", stdout)[0]
self._ipyconfig = (ipy_json, ipy_ssh)
self.client = Client(ipy_json, sshkey=ipy_ssh)
return
def get_live_clusters(self):
'''
Return information on currently running clusters. Outputs a
dictionary keyed on cluster name
'''
all_regions = np.unique([x[:-1] for x in self.configured_regions])
stdoutA = str()
for region in all_regions:
cmd = "starcluster -c {0} -r {1} lc".format(self.aws_config_base,
region)
stdout = self._star_cmd(cmd)
stdoutA += '\r\n' + stdout
# Select cluster info items
Name = re.findall(r'-+\r\n(.*?)\s\(', stdoutA)
LaunchTime = re.findall(r'Launch\stime:\s(.*?)\r\n', stdoutA)
Uptime = re.findall(r'Uptime:\s(.*?)\r\n', stdoutA)
VPC = re.findall(r'VPC:\s(.*?)\r\n', stdoutA)
Subnet = re.findall(r'Subnet:\s(.*?)\r\n', stdoutA)
Zone = re.findall(r'Zone:\s(.*?)\r\n', stdoutA)
Keypair = re.findall(r'Keypair:\s(.*?)\r\n', stdoutA)
EbsVolumes = re.findall(r'EBS\svolumes:([\s\S]*?)Spot\srequests',
stdoutA)
SpotRequests = re.findall(r'Spot\srequests:\s(.*?)\r\n', stdoutA)
ClusterNodes = re.findall(r'Cluster\snodes:\r\n([\s\S]*?)' +
'Total\snodes', stdoutA)
TotalNodes = re.findall(r'Total\snodes:\s(.*?)\r\n', stdoutA)
# Package into dict
clusters = {}
for i, name in enumerate(Name):
clusters[name] = {}
clusters[name]['LaunchTime'] = LaunchTime[i]
clusters[name]['Uptime'] = Uptime[i]
clusters[name]['VPC'] = VPC[i]
clusters[name]['Subnet'] = Subnet[i]
clusters[name]['Zone'] = Zone[i]
clusters[name]['Keypair'] = Keypair[i]
clusters[name]['EbsVolumes'] = re.findall(r'\s+(.*)?\r\n',
EbsVolumes[i])
clusters[name]['SpotRequests'] = SpotRequests[i]
clusters[name]['ClusterNodes'] = re.findall(r'\s+(.*)?\r\n',
ClusterNodes[i])
clusters[name]['TotalNodes'] = TotalNodes[i]
return clusters
def get_spot_history(self, inst_type, region):
''''
Return spot price information for instance & region
'''
cmd = "starcluster -c {0} -r {1} spothistory -z {2} -c {3}".format(
self.aws_config_base, region[:-1], region, inst_type)
stdout = self._star_cmd(cmd)
pLive = re.findall(r'Current\sprice:\s\$([\.\d]*?)\r\n', stdout)
pMax = re.findall(r'Max\sprice:\s\$([\.\d]*?)\r\n', stdout)
pAvg = re.findall(r'Average\sprice:\s\$([\.\d]*?)\r\n', stdout)
return {'region': region, 'instance': inst_type,
'live': float(pLive[0]), 'max': float(pMax[0]),
'avg': float(pAvg[0])}
def run_parallel(self, function, iterable):
'''
Run function across a live cluster. Iterable must be list-like
variable whose items can be passed to function.
'''
assert hasattr(self, 'client')
lv = self.client.load_balanced_view()
results = lv.map(function, iterable)
p_ix = -1
# Report progress
while not results.ready():
ix = float(results.progress)
if ix > p_ix:
print('{0}% Complete'.format(
str(np.round(ix/len(iterable), 2) *100)))
time.sleep(60)
p_ix = ix
return results
def put_file(self, local_path, remote_path='/ramdata/temp'):
'''
Copy file to cluster, default is /ramdata/temp
'''
assert os.path.exists(local_path)
assert self.islive
cmd = 'starcluster -r {0} -c {1} put {2} {3} {4}'.format(
self._region[:-1], self._config, self._region, local_path,
remote_path)
stdout = self._star_cmd(cmd)
return
def get_file(self, local_path, remote_path):
'''
Copy file to cluster, will be placed under /ramdata
'''
assert os.path.exists(local_path)
assert self.islive
cmd = 'starcluster -r {0} -c {1} get {2} {3} {4}'.format(
self._region[:-1], self._config, self._region, remote_path,
local_path)
stdout = self._star_cmd(cmd)
return
def teardown(self):
'''
Shutdown cluster and cleanup instance variables
'''
if self.islive is False:
print('No live cluster recognized. Please shutdown explicitly')
return
if hasattr(self, 'client'):
self.client.close()
self.shutdown_cluster(self._region)
self._config = None
self._region = None
self._ipyconfig = None
self._n_nodes = None
return
def restart_cluster(self):
'''
Reboot cluster
'''
assert self.islive
self.client.close()
cmd = 'starcluster -r {0} -c {1} restart {2}'.format(
self._region[:-1], self._config, self._region)
stdout = self._star_cmd(cmd)
self.client = Client(self._ipyconfig[0], sshkey=self._ipyconfig[1])
return
def shutdown_cluster(self, cluster_name):
'''
Shutdown a cluster
'''
cmd = 'starcluster -c {0} -r {1} terminate -f {2}'.format(
self.aws_config_base, cluster_name[:-1], cluster_name)
stdout = self._star_cmd(cmd, 'y')
# Delete config if exists
config_path = os.path.join(aws_data, 'configs',
'config_{0}'.format(cluster_name))
if os.path.exists(config_path):
os.remove(config_path)
print('Cluster shutdown complete')
return
def _star_cmd(self, cmd, stdin_txt=None):
'''
Run a command in subprocess, handle errors and return output from
stdout
'''
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = proc.communicate(stdin_txt)
if proc.poll() != 0:
print(stderr)
raise
return stdout
def pull_git_branch(self, repo, branch):
'''
Pulls a branch from github in the specified repository. Reboots cluster
at end of function to ensure cluster is functioning.
'''
assert self.islive
nodes = self.get_live_clusters()[self._region]['ClusterNodes']
assert len(nodes) == self._n_nodes
for i in range(self._n_nodes):
nodei = 'master' if i == 0 else 'node00{0}'.format(i)
cmd = 'starcluster -r {0} -c {1} sn {2} {3} -u ubuntu'.format(
self._region[:-1], self._config, self._region, nodei)
# Open subprocess ssh to cluster node
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
nbsr = NBSR(proc.stdout)
nbsr.readuntil(timeout=10)
# Change directory
proc.stdin.write('cd ~/ramsrc/{} \n'.format(repo))
nbsr.readuntil(re_error=r'No\ssuch\sfile\sor\sdirectory')
# /home is NFS shared accross cluster, pull only once on master
if nodei == 'master':
# Fetch new changes in repo
proc.stdin.write('git fetch \n')
nbsr.readuntil(timeout=7, re_error=r'error')
# Checkout desired branch
proc.stdin.write('git checkout {} \n'.format(branch))
nbsr.readuntil(re_error=r'error')
# Pull latest changes
proc.stdin.write('git pull \n')
nbsr.readuntil(timeout=7, re_error=r'error')
# Update python libs on all nodes
proc.stdin.write('sudo python setup.py install \n')
nbsr.readuntil(timeout=7,
re_error=r'No\ssuch\sfile\sor\sdirectory')
# Exit ssh session
proc.stdin.write('exit \n')
nbsr.readuntil()
print('/{1}/{2} updated on node: {0}'.format(nodei, repo, branch))
print('Rebooting Cluster')
self.restart_cluster()
if __name__ == '__main__':
cc = ClusterController()
cc.set_config(2)
# cc.launch_cluster()
cc.teardown()
| null |
ram/aws/cluster.py
|
cluster.py
|
py
| 12,464 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "ipyparallel.Client",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "ipyparallel.Client",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "ram.aws.stream_reader.NonBlockingStreamReader",
"line_number": 294,
"usage_type": "call"
}
] |
128409060
|
from functools import lru_cache
import boto3
import itertools
from collections import defaultdict
from operator import itemgetter
from typing import List, MutableMapping
from django.conf import settings
from rest_framework.fields import (
BooleanField, CharField, ChoiceField, DateField, DecimalField, EmailField, IntegerField,
UUIDField,
)
from extended_choices import Choices
import yaml
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, transaction
def filter_key(dict_, key_to_remove):
return {k: v for k, v in dict_.items() if k != key_to_remove}
def group_by_key(l: List[MutableMapping], key: str, flatten: bool = False) -> MutableMapping:
"""
:param l: list of dicts .e.g [{'a': 1, 'b': 1}, {'b': 2, 'a': 2}]
:param dict_key: the dict key to group by
:return: a dict with keys and an object or list of objects in the format:
{1: [{'b': 1}], 2: [{'b': 2}]} or if flatten=True {1: {'b': 1}, 2: {'b': 2}}
"""
key_getter = itemgetter(key)
l.sort(key=key_getter)
groups = defaultdict(list)
for group, vals in itertools.groupby(l, key=key_getter):
groups[group] = [filter_key(data, key) for data in vals]
return {k: v[0] if flatten else v for k, v in groups.items()}
def getitem_or_default(l, idx, default=None):
"""
gets the item at position idx or returns the default value
:param list: list of things
:param idx: position
:param default: optional default value
:return: thing at index idx or default
"""
try:
return l[idx]
except IndexError:
return default
class TrackedSupersetChoices(Choices):
"""
Same as a normal Choices object except subsets have access to
their superset.
"""
def add_subset(self, name, constants):
super(TrackedSupersetChoices, self).add_subset(name, constants)
subset = getattr(self, name)
subset.superset = self
def get_bucket_credentials(bucket_id):
"""Get S3 credentials for bucket id."""
if bucket_id not in settings.DOCUMENT_BUCKETS:
raise Exception(f'Bucket "{bucket_id}" not configured.')
return settings.DOCUMENT_BUCKETS[bucket_id]
def get_bucket_name(bucket_id):
"""Get bucket name for given bucket id."""
return get_bucket_credentials(bucket_id)['bucket']
@lru_cache()
def get_s3_client_for_bucket(bucket_id):
"""Get S3 client for bucket id."""
credentials = get_bucket_credentials(bucket_id)
return boto3.client(
's3',
aws_access_key_id=credentials['aws_access_key_id'],
aws_secret_access_key=credentials['aws_secret_access_key'],
region_name=credentials['aws_region'],
config=boto3.session.Config(signature_version='s3v4'),
)
def parse_bool(value):
"""Parses a boolean value from a string."""
return _parse_value(value, BooleanField())
def parse_date(value):
"""Parses a date from a string."""
return _parse_value(value, DateField())
def parse_decimal(value, max_digits=19, decimal_places=2):
"""Parses a decimal from a string."""
return _parse_value(value, DecimalField(max_digits, decimal_places))
def parse_email(value):
"""Parses an email address from a string."""
return _parse_value(value, EmailField(), blank_value='')
def parse_uuid(value):
"""Parses a UUID from a string."""
return _parse_value(value, UUIDField())
def parse_int(value):
"""Parses a integer from a string."""
return _parse_value(value, IntegerField())
def parse_uuid_list(value):
"""Parses a comma-separated list of UUIDs from a string."""
return _parse_list(value, UUIDField())
def parse_int_list(value):
"""Parses a comma-separated list of Integers from a string."""
return _parse_list(value, IntegerField())
def parse_choice(value, choices, blank_value=''):
"""Parses and validates a value from a list of choices."""
return _parse_value(value, ChoiceField(choices=choices), blank_value=blank_value)
def parse_limited_string(value, max_length=settings.CHAR_FIELD_MAX_LENGTH):
"""Parses/validates a string."""
return _parse_value(value, CharField(max_length=max_length), blank_value='')
def _parse_value(value, field, blank_value=None):
if not value or value.lower().strip() == 'null':
return blank_value
field.run_validation(value)
return field.to_internal_value(value)
def _parse_list(value, field):
"""Parses a comma-separated list of UUIDs from a string."""
if not value or value.lower().strip() == 'null':
return []
return [field.to_internal_value(item) for item in value.split(',')]
def _build_model_data(model, obj_pk, fields_data, using):
data = {}
# Handle each field
for (field_name, field_value) in fields_data.items():
field = model._meta.get_field(field_name)
# Handle many-to-many relations
if field.many_to_many:
raise NotImplementedError('Many-to-many fields not supported')
# Handle one-to-many relations
if field.one_to_many:
raise NotImplementedError('One-to-many fields not supported')
# Handle fk fields
if field.many_to_one:
try:
value = base.deserialize_fk_value(field, field_value, using, False)
except Exception as exc:
raise base.DeserializationError.WithData(
exc,
model._meta.model_name,
obj_pk,
field_value,
) from exc
data[field.attname] = value
# Handle all other fields
else:
try:
data[field.name] = field.to_python(field_value)
except Exception as exc:
raise base.DeserializationError.WithData(
exc,
model._meta.model_name,
obj_pk,
field_value,
) from exc
return data
def _load_data_in_migration(apps, object_list, using=DEFAULT_DB_ALIAS):
for list_item in object_list:
obj_pk = list_item.get('pk')
assert obj_pk, 'pk field required'
model_label = list_item['model']
model = apps.get_model(model_label)
fields_data = list_item['fields']
model_data = _build_model_data(model, obj_pk, fields_data, using)
model.objects.update_or_create(pk=obj_pk, defaults=model_data)
@transaction.atomic
def load_yaml_data_in_migration(apps, fixture_file_path):
"""
Loads the content of the yaml file `fixture_file_path` into the database.
This is similar to `loaddata` but:
- it's safe to be used in migrations
- it does not change the fields that are not present in the yaml
Motivation:
Calling `loaddata` from a data migration makes django use the latest version
of the models instead of the version at the time of that particular migration.
This causes problems e.g. adding a new field to a model which had a data migration
in the past is okay but migrating from zero fails as the model in
loaddata (the latest) has a field that did not exist at that migration time.
Limitations:
- Many-to-many fields are not supported yet
- all items in the yaml have to include a pk field
"""
with open(fixture_file_path, 'rb') as fixture:
object_list = yaml.safe_load(fixture)
_load_data_in_migration(apps, object_list)
| null |
core/utils.py
|
utils.py
|
py
| 7,467 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.MutableMapping",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "operator.itemgetter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "itertools.groupby",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "extended_choices.Choices",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DOCUMENT_BUCKETS",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.DOCUMENT_BUCKETS",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "boto3.client",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "boto3.session.Config",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "boto3.session",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "functools.lru_cache",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "rest_framework.fields.BooleanField",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "rest_framework.fields.DateField",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "rest_framework.fields.DecimalField",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "rest_framework.fields.EmailField",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "rest_framework.fields.UUIDField",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "rest_framework.fields.IntegerField",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "rest_framework.fields.UUIDField",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "rest_framework.fields.IntegerField",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "rest_framework.fields.ChoiceField",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.CHAR_FIELD_MAX_LENGTH",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "rest_framework.fields.CharField",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "django.core.serializers.base.deserialize_fk_value",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "django.core.serializers.base",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "django.core.serializers.base.DeserializationError.WithData",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "django.core.serializers.base.DeserializationError",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "django.core.serializers.base",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "django.core.serializers.base.DeserializationError.WithData",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "django.core.serializers.base.DeserializationError",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "django.core.serializers.base",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "django.db.DEFAULT_DB_ALIAS",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "django.db.transaction",
"line_number": 211,
"usage_type": "name"
}
] |
429678554
|
import random
import json
"""
1. Создать программно файл в текстовом формате, записать в него построчно данные, вводимые пользователем.
Об окончании ввода данных свидетельствует пустая строка.
"""
print("*" * 50 + " 1 " + "*" * 50)
with open('5_1.txt', 'w') as file:
name = input('Введите свое имя: ')
file.write(name + '\n')
last_name = input('Введите свою фамилию: ')
file.write(last_name + '\n')
"""
2. Создать текстовый файл (не программно), сохранить в нем несколько строк,
выполнить подсчет количества строк, количества слов в каждой строке.
"""
print("*" * 50 + " 2 " + "*" * 50)
with open('5_2.txt', 'r', encoding='utf-8') as file_1:
print(f"Количество строк в файле: {sum(1 for line in file_1)}")
with open('5_2.txt', 'r', encoding='utf-8') as file_2:
something_list = 0
for line in file_2:
something_list += len(line.split())
print(f"Количество слов в файле: {something_list}")
"""
3. Создать текстовый файл (не программно), построчно записать фамилии сотрудников и величину их окладов (не менее 10 строк).
Определить, кто из сотрудников имеет оклад менее 20 тыс., вывести фамилии этих сотрудников.
Выполнить подсчет средней величины дохода сотрудников.
Пример файла:
Иванов 23543.12
Петров 13749.32
"""
print("*" * 50 + " 3 " + "*" * 50)
empl_dict = {}
with open('5_3.txt', 'r', encoding='utf-8') as file_3:
for line in file_3:
empl_dict[line.split()[0]] = float(line.split()[1])
if float(line.split()[1]) < 20000:
print(f"Зарплата меньше 20000 у {line.split()[0]}: {float(line.split()[1])}")
max_salary = max(empl_dict.values())
employee = {k:v for k, v in empl_dict.items() if v == max_salary}
print(employee)
"""
4. Создать (не программно) текстовый файл со следующим содержимым:
One — 1
Two — 2
Three — 3
Four — 4
Необходимо написать программу, открывающую файл на чтение и считывающую построчно данные.
При этом английские числительные должны заменяться на русские.
Новый блок строк должен записываться в новый текстовый файл.
"""
print("*" * 50 + " 4 " + "*" * 50)
f = open('5_result.txt', 'w')
f.close()
with open('5_4.txt', 'r', encoding='utf-8') as file_4:
for line in file_4:
if 'One' in line:
line = line.replace('One', 'Один')
elif 'Two' in line:
line = line.replace('Two', 'Два')
elif 'Three' in line:
line = line.replace('Three', 'Три')
elif 'Four' in line:
line = line.replace('Four', 'Четые')
with open('5_result.txt', 'a', encoding='utf-8') as file_4_:
file_4_.write(line)
"""
5. Создать (программно) текстовый файл, записать в него программно набор чисел, разделенных пробелами.
Программа должна подсчитывать сумму чисел в файле и выводить ее на экран.
"""
print("*" * 50 + " 5 " + "*" * 50)
f = open('5_5.txt', 'w')
f.close()
rand_line = []
for i in range(1, 21):
rand_line.append(int(random.random() * 100))
with open('5_5.txt', 'a', encoding='utf-8') as file_5:
for i in rand_line:
file_5.write(str(i) + " ")
file_5.close()
with open('5_5.txt', 'r', encoding='utf-8') as file_5_2:
list_from_file = []
for line in file_5_2:
list_from_file += line.split()
print(f"Сумма чисел в файле: {sum(int(i) for i in list_from_file)}")
"""
6. Необходимо создать (не программно) текстовый файл, где каждая строка описывает учебный предмет и наличие лекционных,
практических и лабораторных занятий по этому предмету и их количество.
Важно, чтобы для каждого предмета не обязательно были все типы занятий.
Сформировать словарь, содержащий название предмета и общее количество занятий по нему.
Вывести словарь на экран.
Примеры строк файла: Информатика: 100(л) 50(пр) 20(лаб).
Физика: 30(л) — 10(лаб)
Физкультура: — 30(пр) —
Пример словаря: {“Информатика”: 170, “Физика”: 40, “Физкультура”: 30}
"""
print("*" * 50 + " 6 " + "*" * 50)
lessons_dict = {}
with open('5_6.txt', 'r', encoding='utf-8') as file_6:
for line in file_6:
sum = 0
for i in range(1, 4):
line_new = 0
try:
line_new = line.split()[i].split("(")
sum += int(line_new[0])
except (ValueError, TypeError):
pass
lessons_dict[line.split()[0]] = sum
print(lessons_dict)
"""
7. Создать вручную и заполнить несколькими строками текстовый файл,
в котором каждая строка должна содержать данные о фирме: название, форма собственности, выручка, издержки.
Пример строки файла: firm_1 ООО 10000 5000.
Необходимо построчно прочитать файл, вычислить прибыль каждой компании, а также среднюю прибыль.
Если фирма получила убытки, в расчет средней прибыли ее не включать.
Далее реализовать список. Он должен содержать словарь с фирмами и их прибылями, а также словарь со средней прибылью.
Если фирма получила убытки, также добавить ее в словарь (со значением убытков).
Пример списка: [{“firm_1”: 5000, “firm_2”: 3000, “firm_3”: 1000}, {“average_profit”: 2000}].
Итоговый список сохранить в виде json-объекта в соответствующий файл.
Пример json-объекта:
[{"firm_1": 5000, "firm_2": 3000, "firm_3": 1000}, {"average_profit": 2000}]
Подсказка: использовать менеджер контекста.
"""
print("*" * 50 + " 7 " + "*" * 50)
firm_dict = {}
with open('5_7.txt', 'r', encoding='utf-8') as file_7:
for line in file_7:
firm_info = line.replace('\n', '').split(';')
firm_dict[firm_info[0]] = float(firm_info[1]) - float(firm_info[2])
print(f"Прибыть кадой компании: {firm_dict}")
print(f"Средняя прибыль компаний: {sum(firm_dict.values()) / len(firm_dict.keys())}")
data_for_json = {'income': firm_dict, 'average_profit': sum(firm_dict.values()) / len(firm_dict.keys())}
print(data_for_json)
with open("my_file_.json", "w", encoding='utf-8') as write_f:
json.dump(data_for_json, write_f)
| null |
lesson_5.py
|
lesson_5.py
|
py
| 7,956 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.random",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 167,
"usage_type": "call"
}
] |
502123428
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
import subprocess
import time
import re
import configparser as ConfigParser
import threading
import os
# Default configuration
config = {
'mqtt': {
'broker': 'localhost',
'port': 1883,
'prefix': 'media',
'user': os.environ.get('MQTT_USER'),
'password': os.environ.get('MQTT_PASSWORD'),
},
'cec': {
'enabled': 0,
'id': 1,
'port': 'RPI',
'devices': '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15',
}
}
def mqtt_send(topic, value, retain=False):
mqtt_client.publish(topic, value, retain=retain)
def cec_on_message(level, time, message):
if level == cec.CEC_LOG_TRAFFIC:
m = re.search('>> [0-9a-f]{2}:44:([0-9a-f]{2})', message)
if m:
handleKeyPress(m.group(1))
return
m = re.search('>> [0-9a-f]{2}:8b:([0-9a-f]{2})', message)
if m:
handleKeyRelease(m.group(1))
return
def cec_send(cmd, id=None):
if id is None:
cec_client.Transmit(cec_client.CommandFromString(cmd))
else:
cec_client.Transmit(cec_client.CommandFromString('1%s:%s' % (hex(id)[2:], cmd)))
def translateKey(key):
localKey = None
if key == "41":
localKey = "volumeup"
elif key == "42":
localKey = "volumedown"
elif key == "43":
localKey = "volumemute"
return localKey
def handleKeyPress(key):
remoteKey = translateKey(key)
if remoteKey == None:
return
mqtt_send(config['mqtt']['prefix'] + '/cec/' + remoteKey, 'on', True)
def handleKeyRelease(key):
remoteKey = translateKey(key)
if remoteKey == None:
return
mqtt_send(config['mqtt']['prefix'] + '/cec/' + remoteKey, 'off', True)
def cec_refresh():
try:
for id in config['cec']['devices'].split(','):
cec_send('8F', id=int(id))
except Exception as e:
print("Error during refreshing: ", str(e))
def cleanup():
mqtt_client.loop_stop()
mqtt_client.disconnect()
import signal
import time
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
try:
### Parse config ###
killer = GracefulKiller()
try:
Config = ConfigParser.SafeConfigParser()
if Config.read("/home/pi/scripts/config.ini"):
# Load all sections and overwrite default configuration
for section in Config.sections():
config[section].update(dict(Config.items(section)))
# Environment variables
for section in config:
for key, value in config[section].items():
env = os.getenv(section.upper() + '_' + key.upper());
if env:
config[section][key] = type(value)(env)
# Do some checks
if not int(config['cec']['enabled']) == 1:
raise Exception('CEC is disabled. Can\'t continue.')
except Exception as e:
print("ERROR: Could not configure:", str(e))
exit(1)
### Setup CEC ###
if int(config['cec']['enabled']) == 1:
print("Initialising CEC...")
try:
import cec
global repeatingKey
repeatingKey = None
cec_config = cec.libcec_configuration()
cec_config.strDeviceName = "cec-audio"
cec_config.bActivateSource = 0
cec_config.deviceTypes.Add(cec.CEC_DEVICE_TYPE_AUDIO_SYSTEM)
cec_config.clientVersion = cec.LIBCEC_VERSION_CURRENT
cec_config.SetLogCallback(cec_on_message)
cec_client = cec.ICECAdapter.Create(cec_config)
if not cec_client.Open(config['cec']['port']):
raise Exception("Could not connect to cec adapter")
cec_send("50:72:01:00")
cec_send("50:7E:01:00")
except Exception as e:
print("ERROR: Could not initialise CEC:", str(e))
exit(1)
### Setup MQTT ###
print("Initialising MQTT...")
mqtt_client = mqtt.Client("cec-ir-mqtt")
if config['mqtt']['user']:
mqtt_client.username_pw_set(config['mqtt']['user'], password=config['mqtt']['password']);
mqtt_client.connect(config['mqtt']['broker'], int(config['mqtt']['port']), 60)
mqtt_client.loop_start()
print("Starting main loop...")
while True:
time.sleep(10)
if killer.kill_now:
break
except KeyboardInterrupt:
cleanup()
except RuntimeError:
cleanup()
| null |
bridge.py
|
bridge.py
|
py
| 4,700 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "signal.SIGINT",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "signal.signal",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "signal.SIGTERM",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "configparser.SafeConfigParser",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "cec.libcec_configuration",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "cec.CEC_DEVICE_TYPE_AUDIO_SYSTEM",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "cec.LIBCEC_VERSION_CURRENT",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "cec.ICECAdapter.Create",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "cec.ICECAdapter",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "paho.mqtt.client.Client",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.client",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 162,
"usage_type": "call"
}
] |
288915456
|
import sys
import os
import time
import datetime
import redis
import json
# Smykowski takes the specifications from the customers and brings them down to
# the software engineers.
# XXX: maybe these should be to stdout instead of stderr, I dunno
def debug(msg, force_debug=False):
if DEBUG or force_debug:
msg_output = u"{0}{1}\n".format(PID_PREFIX, msg)
sys.stderr.write(msg_output.encode('utf8'))
sys.stderr.flush()
def mention(msg):
msg_output = u"{0}{1}\n".format(PID_PREFIX, msg)
sys.stderr.write(msg_output.encode('utf8'))
sys.stderr.flush()
DEBUG = os.environ.get('UMAD_SMYKOWSKI_DEBUG')
PID_PREFIX = '[pid {0}] '.format(os.getpid())
def enqueue(dst_redis, queue_name, url):
try:
debug(u"About to insert {0} into {1}".format(url, queue_name))
pipeline = dst_redis.pipeline()
pipeline.zadd(queue_name, time.time(), url)
pipeline.lpush('barber', 'dummy_value')
pipeline.execute() # will return something like: [ {0|1}, num_dummies ]
mention(u"Successful insertion of {0} into {1}".format(url, queue_name))
except Exception as e:
mention(u"Something went boom while inserting {0}: {1}".format(url, e))
raise
def main(argv=None):
debug("Debug logging is enabled")
redis_server_host = os.environ.get('UMAD_REDIS_HOST', 'localhost')
redis_server_port = os.environ.get('UMAD_REDIS_PORT', 6379)
redis_server_db_src = os.environ.get('UMAD_REDIS_DB_SRC', 8)
redis_server_db_dst = os.environ.get('UMAD_REDIS_DB_DST', 0)
redis_server_src_key = os.environ.get('UMAD_REDIS_AWESANT_KEY', 'umad_event:queue')
src_redis = redis.StrictRedis(host=redis_server_host, port=int(redis_server_port), db=redis_server_db_src)
dst_redis = redis.StrictRedis(host=redis_server_host, port=int(redis_server_port), db=redis_server_db_dst)
# Get URLs out of Redis, look for interesting ones, put them into the
# indexing listener's queue. Make this very stupid, daemontools will
# restart us if the world explodes.
while True:
log_entry_json = src_redis.blpop(redis_server_src_key)[1] # Disregard the popped keyname
log_entry_dict = json.loads(log_entry_json)
log_message = log_entry_dict[u'@message']
(request_method, request_url) = log_message.split(None, 1)
#debug("Got a request with method {0} and URL of {1}".format(request_method, request_url))
# Skip junk
if any([
'/moin_static' in request_url, # there may be a numeric suffix on this before the next slash
'/gollum_static/' in request_url,
request_url.endswith('.js'),
request_url.endswith('.css'),
request_url.endswith( ('.jpg','.png','.gif') ),
request_url.endswith( ('.woff','.ttf','.otf') ),
request_url.endswith( ('/create-a-new-page','/new-page') ),
]):
continue
# Throw it in the queue
if request_url.startswith('https://docs.anchor.net.au/'):
# Our gollum wikis are special (ie. annoying). Gollum
# POSTs to URLs that don't really exists, so we have to
# derive the actual pagename ourself and call a fake
# POST on that. -_- To ensure we can tell the fakes
# from the real ones, we use INDEX as the method
# instesd.
if request_method in ('INDEX',):
enqueue(dst_redis, 'umad_indexing_queue', request_url)
else:
# nginx encodes URLs with backslash hex escapes, which
# we need to clean up. Now it's a Unicode object. No
# name-mangling is needed for docs.anchor because it's
# Gollum, which only allows for plain filenames/URLs.
request_url = request_url.decode('string_escape').decode('utf8')
# When the Python3 revolution arrives we'll need to do this:
# request_url = bytes(request_url, 'utf-8').decode('unicode_escape')
# http://stackoverflow.com/a/4020824
if request_method in ('POST', 'PUT'):
enqueue(dst_redis, 'umad_indexing_queue', request_url)
if request_method in ('DELETE',):
enqueue(dst_redis, 'umad_deletion_queue', request_url)
# Make a note that we saw a heartbeat. We'd like to keep all
# the hits we've seen in the last N minutes (eg. 5min), but
# Redis doesn't export expiry of list elements, only the whole
# key. Instead we use a sorted set to associate a timestamp
# with each heartbeat ping, making it easy for us to grab only
# recent pings for inspection, and delete old ones once their
# usefulness has passed.
if request_method == 'PING':
# This is made somewhat harder because we want to keep
# a numeric range of timestamps, not a quantity of
# them. Sorted sets look like the only way to do it.
# By way of example, assume that each backend pings
# every 5min, and we'd like to keep the last 30min
# worth of ping timestamps. We *could* keep a list of
# ping timestamps and truncate to len=100 all the time,
# but that doesn't assure a sane distribution of
# timestamps and tell us what we *really* want to know.
# Because sets are unique, I think I'll need to use
# dummy key names, and keep the timestamps purely as
# scores.
# I'd like to do a proper conversion of URL-to-doctype,
# but that would introduct a dependency on the
# distillers. It'd be fine, but I think I'd rather
# avoid it right now, so we'll leave it to be someone
# else's problem when it's read back for monitoring.
# Assume that the PING'd urls for each backend don't
# change over time, we can still do pattern
# recognition.
current_time = datetime.datetime.utcnow()
thirty_minutes = datetime.timedelta(minutes=30)
thirty_minutes_ago = current_time - thirty_minutes
current_timestamp = int(time.mktime(current_time.timetuple()))
thirty_minutes_ago_timestamp = int(time.mktime(thirty_minutes_ago.timetuple()))
dst_redis.zremrangebyscore('umad_backend_heartbeats', '-inf', thirty_minutes_ago_timestamp)
dst_redis.zadd('umad_backend_heartbeats', current_timestamp, "{0} {1}".format(request_url, current_timestamp) )
mention(u"Logged heartbeat ping for {0} at {1}".format(request_url, current_timestamp))
# On readback we simply do:
# dst_redis.zrangebyscore('umad_backend_heartbeats', thirty_minutes_ago_timestamp, '+inf')
# Split each result, keep the ones for which the first
# element is the backend we care about, then analyse
# the numbers.
return 0
if __name__ == "__main__":
sys.exit(main())
| null |
smykowski/smykowski.py
|
smykowski.py
|
py
| 6,287 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.stderr.write",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.flush",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.flush",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.getpid",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "redis.StrictRedis",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "redis.StrictRedis",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 161,
"usage_type": "call"
}
] |
19777276
|
import pytest
import numpy as np
from os import path
from astropy.io import fits
from astropy import wcs
from astropy.coordinates import SkyCoord
import astropy.units as u
from .utils_for_test import create_test_ffis
from ..make_cube import CubeFactory
from ..cube_cut import CutoutFactory
from ..exceptions import InvalidQueryError, InputWarning
def checkcutout(cutfile, pixcrd, world, csize, ecube, eps=1.e-7):
"""Check FITS cutout for correctness
Checks RA_OBJ/DEC_OBJ in primary header, and TIME, FLUX, and
FLUX_ERR in table.
Inputs:
cutfile Name of FITS cutout file
pixcrd [2] pixel coordinates for cutout center [cy,cx]
world [2] RA, Dec in degrees for cutout center
csize Integer size of cutout (probably should be odd)
ecube Simulated data cube
eps Maximum allowed distance offset in degrees
Returns True on success, False on failure
"""
ix = int(pixcrd[1])
iy = int(pixcrd[0])
x1 = ix - csize//2
x2 = x1 + csize - 1
y1 = iy - csize//2
y2 = y1 + csize - 1
hdulist = fits.open(cutfile)
ra_obj = hdulist[0].header['RA_OBJ']
dec_obj = hdulist[0].header['DEC_OBJ']
pinput = SkyCoord(world[0], world[1], frame='icrs', unit='deg')
poutput = SkyCoord(ra_obj, dec_obj, frame='icrs', unit='deg')
dist = pinput.separation(poutput).degree
assert dist <= eps, "{} separation in primary header {} too large".format(cutfile, dist)
ntimes = ecube.shape[2]
tab = hdulist[1].data
assert len(tab) == ntimes, "{} expected {} entries, found {}".format(cutfile, ntimes, len(tab))
assert (tab['TIME'] == (np.arange(ntimes)+0.5)).all(), "{} some time values are incorrect".format(cutfile)
check1(tab['FLUX'], x1, x2, y1, y2, ecube[:, :, :, 0], 'FLUX', cutfile)
check1(tab['FLUX_ERR'], x1, x2, y1, y2, ecube[:, :, :, 1], 'FLUX_ERR', cutfile)
# Regression test for PR #6
assert hdulist[2].data.dtype.type == np.int32
return
def check1(flux, x1, x2, y1, y2, ecube, label, cutfile):
"""Test one of flux or error"""
cx = ecube.shape[0]
cy = ecube.shape[1]
if x1 < 0:
assert np.isnan(flux[:, :-x1, :]).all(), "{} {} x1 NaN failure".format(cutfile, label)
if y1 < 0:
assert np.isnan(flux[:, :, :-y1]).all(), "{} {} y1 NaN failure".format(cutfile, label)
if x2 >= cx:
assert np.isnan(flux[:, -(x2-cx+1):, :]).all(), "{} {} x2 NaN failure".format(cutfile, label)
if y2 >= cy:
assert np.isnan(flux[:, :, -(y2-cy+1):]).all(), "{} {} y2 NaN failure".format(cutfile, label)
x1c = max(x1, 0)
y1c = max(y1, 0)
x2c = min(x2, cx-1)
y2c = min(y2, cy-1)
scube = ecube[x1c:x2c, y1c:y2c, :]
sflux = np.moveaxis(flux[:, x1c-x1:x2c-x1, y1c-y1:y2c-y1], 0, -1)
assert (scube == sflux).all(), "{} {} comparison failure".format(cutfile, label)
return
def test_cube_cutout(tmpdir):
"""
Testing the cube cutout functionality.
"""
# Making the test cube
cube_maker = CubeFactory()
img_sz = 10
num_im = 100
ffi_files = create_test_ffis(img_sz, num_im, dir_name=tmpdir)
cube_file = cube_maker.make_cube(ffi_files, path.join(tmpdir, "test_cube.fits"), verbose=False)
# Read one of the input images to get the WCS
img_header = fits.getheader(ffi_files[0], 1)
cube_wcs = wcs.WCS(img_header)
# get pixel positions at edges and center of image
# somewhat cryptic one-liner to get the grid of points
pval = np.array([0, img_sz//2, img_sz-1], dtype=np.float)
pixcrd = pval[np.transpose(np.reshape(np.mgrid[0:3, 0:3], (2, 9)))]
# add one more giant cutout that goes off all 4 edges
pixcrd = np.append(pixcrd, pixcrd[4].reshape(1, 2), axis=0)
# getting the world coordinates
world_coords = cube_wcs.all_pix2world(pixcrd, 0)
# Getting the cutouts
cutbasename = 'make_cube_cutout_{}.fits'
cutlist = [path.join(tmpdir, cutbasename.format(i)) for i in range(len(world_coords))]
csize = [img_sz//2]*len(world_coords)
csize[-1] = img_sz+5
for i, v in enumerate(world_coords):
coord = SkyCoord(v[0], v[1], frame='icrs', unit='deg')
CutoutFactory().cube_cut(cube_file, coord, csize[i], target_pixel_file=cutlist[i],
output_path=tmpdir, verbose=False)
# expected values for cube
ecube = np.zeros((img_sz, img_sz, num_im, 2))
plane = np.arange(img_sz*img_sz, dtype=np.float32).reshape((img_sz, img_sz))
for i in range(num_im):
ecube[:, :, i, 0] = -plane
ecube[:, :, i, 1] = plane
plane += img_sz*img_sz
# Doing the actual checking
for i, cutfile in enumerate(cutlist):
checkcutout(cutfile, pixcrd[i], world_coords[i], csize[i], ecube)
def test_cutout_extras(tmpdir):
# Making the test cube
cube_maker = CubeFactory()
img_sz = 10
num_im = 100
ffi_files = create_test_ffis(img_sz, num_im)
cube_file = cube_maker.make_cube(ffi_files, path.join(tmpdir, "test_cube.fits"), verbose=False)
# Making the cutout
myfactory = CutoutFactory()
coord = "256.88 6.38"
###########################
# Test _parse_table_info #
###########################
cutout_size = [5, 3]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size,
output_path=path.join(tmpdir, "out_dir"), verbose=False)
assert "256.880000_6.380000_5x3_astrocut.fits" in out_file
assert isinstance(myfactory.cube_wcs, wcs.WCS)
ra, dec = myfactory.cube_wcs.wcs.crval
assert round(ra, 4) == 250.3497
assert round(dec, 4) == 2.2809
# checking on the center coordinate too
coord = SkyCoord(256.88, 6.38, frame='icrs', unit='deg')
assert myfactory.center_coord.separation(coord) == 0
############################
# Test _get_cutout_limits #
############################
xmin, xmax = myfactory.cutout_lims[0]
ymin, ymax = myfactory.cutout_lims[1]
assert (xmax-xmin) == cutout_size[0]
assert (ymax-ymin) == cutout_size[1]
cutout_size = [5*u.pixel, 7*u.pixel]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size, verbose=False)
assert "256.880000_6.380000_5x7_astrocut.fits" in out_file
xmin, xmax = myfactory.cutout_lims[0]
ymin, ymax = myfactory.cutout_lims[1]
assert (xmax-xmin) == cutout_size[0].value
assert (ymax-ymin) == cutout_size[1].value
cutout_size = [3*u.arcmin, 5*u.arcmin]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size, verbose=False)
assert "256.880000_6.380000_8x15_astrocut.fits" in out_file
xmin, xmax = myfactory.cutout_lims[0]
ymin, ymax = myfactory.cutout_lims[1]
assert (xmax-xmin) == 8
assert (ymax-ymin) == 15
cutout_size = [1*u.arcsec, 5*u.arcsec]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size, verbose=False)
assert "256.880000_6.380000_1x1_astrocut.fits" in out_file
xmin, xmax = myfactory.cutout_lims[0]
ymin, ymax = myfactory.cutout_lims[1]
assert (xmax-xmin) == 1
assert (ymax-ymin) == 1
#############################
# Test _get_full_cutout_wcs #
#############################
cutout_size = [5, 3]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size, verbose=False)
cutout_wcs_full = myfactory._get_full_cutout_wcs(fits.getheader(cube_file, 2))
assert (cutout_wcs_full.wcs.crpix == [1045 - myfactory.cutout_lims[0, 0],
1001 - myfactory.cutout_lims[1, 0]]).all()
########################
# Test _fit_cutout_wcs #
########################
max_dist, sigma = myfactory._fit_cutout_wcs(cutout_wcs_full, (3, 5))
assert max_dist.deg < 1e-05
assert sigma < 1e-05
cry, crx = myfactory.cutout_wcs.wcs.crpix
assert round(cry) == 3
assert round(crx) == 2
##########################
# Test target pixel file #
##########################
# Testing the cutout content is in test_cube_cutout
# this tests that the format of the tpf is what it should be
tpf = fits.open(out_file)
assert tpf[0].header["ORIGIN"] == 'STScI/MAST'
tpf_table = tpf[1].data
assert len(tpf_table.columns) == 12
assert "TIME" in tpf_table.columns.names
assert "FLUX" in tpf_table.columns.names
assert "FLUX_ERR" in tpf_table.columns.names
assert "FFI_FILE" in tpf_table.columns.names
cutout_img = tpf_table[0]['FLUX']
assert cutout_img.shape == (3, 5)
assert cutout_img.dtype.name == 'float32'
aperture = tpf[2].data
assert aperture.shape == (3, 5)
assert aperture.dtype.name == 'int32'
tpf.close()
def test_exceptions(tmpdir):
"""
Testing various error conditions.
"""
# Making the test cube
cube_maker = CubeFactory()
img_sz = 10
num_im = 100
ffi_files = create_test_ffis(img_sz, num_im)
cube_file = cube_maker.make_cube(ffi_files, path.join(tmpdir, "test_cube.fits"), verbose=False)
# Setting up
myfactory = CutoutFactory()
hdu = fits.open(cube_file)
cube_table = hdu[2].data
# Testing when none of the FFIs have good wcs info
cube_table["WCSAXES"] = 0
with pytest.raises(Exception, match='No FFI rows contain valid WCS keywords.') as e:
myfactory._parse_table_info(cube_table)
assert e.type is wcs.NoWcsKeywordsFoundError
cube_table["WCSAXES"] = 2
# Testing when nans are present
myfactory._parse_table_info(cube_table)
wcs_orig = myfactory.cube_wcs
cube_table["BARYCORR"] = np.nan
myfactory._parse_table_info(cube_table)
assert wcs_orig.to_header_string() == myfactory.cube_wcs.to_header_string()
hdu.close()
# Testing various off the cube inputs
myfactory.center_coord = SkyCoord("50.91092264 6.40588255", unit='deg')
with pytest.raises(Exception, match='Cutout location is not in cube footprint!') as e:
myfactory._get_cutout_limits(np.array([5, 5]))
assert e.type is InvalidQueryError
myfactory.center_coord = SkyCoord("257.91092264 6.40588255", unit='deg')
with pytest.raises(Exception, match='Cutout location is not in cube footprint!') as e:
myfactory._get_cutout_limits(np.array([5, 5]))
assert e.type is InvalidQueryError
# Testing the WCS fitting function
distmax, sigma = myfactory._fit_cutout_wcs(myfactory.cube_wcs, (100, 100))
assert distmax.deg < 0.003
assert sigma < 0.03
distmax, sigma = myfactory._fit_cutout_wcs(myfactory.cube_wcs, (1, 100))
assert distmax.deg < 0.003
assert sigma < 0.03
distmax, sigma = myfactory._fit_cutout_wcs(myfactory.cube_wcs, (100, 2))
assert distmax.deg < 0.03
assert sigma < 0.03
myfactory.center_coord = SkyCoord("256.38994124 4.88986771", unit='deg')
myfactory._get_cutout_limits(np.array([5, 500]))
hdu = fits.open(cube_file)
cutout_wcs = myfactory._get_full_cutout_wcs(hdu[2].header)
hdu.close()
distmax, sigma = myfactory._fit_cutout_wcs(cutout_wcs, (200, 200))
assert distmax.deg < 0.004
assert sigma < 0.2
distmax, sigma = myfactory._fit_cutout_wcs(cutout_wcs, (100, 5))
assert distmax.deg < 0.003
assert sigma < 0.003
distmax, sigma = myfactory._fit_cutout_wcs(cutout_wcs, (3, 100))
assert distmax.deg < 0.003
assert sigma < 0.003
def test_inputs(tmpdir, capsys):
"""
Testing with different user input types/combos. And verbose.
"""
# Making the test cube
cube_maker = CubeFactory()
img_sz = 10
num_im = 100
ffi_files = create_test_ffis(img_sz, num_im)
cube_file = cube_maker.make_cube(ffi_files, path.join(tmpdir, "test_cube.fits"), verbose=False)
# Setting up
myfactory = CutoutFactory()
coord = "256.88 6.38"
cutout_size = [5, 3]*u.pixel
cutout_file = myfactory.cube_cut(cube_file, coord, cutout_size, output_path=tmpdir, verbose=True)
captured = capsys.readouterr()
assert "Image cutout cube shape: (100, 3, 5)" in captured.out
assert "Using WCS from row 50 out of 100" in captured.out
assert "Cutout center coordinate: 256.88,6.38" in captured.out
assert "5x3" in cutout_file
cutout_size = [5, 3]*u.arcmin
cutout_file = myfactory.cube_cut(cube_file, coord, cutout_size, output_path=tmpdir, verbose=False)
assert "14x9" in cutout_file
cutout_size = [5, 3, 9]*u.pixel
with pytest.warns(InputWarning):
cutout_file = myfactory.cube_cut(cube_file, coord, cutout_size, output_path=tmpdir, verbose=False)
assert "5x3" in cutout_file
assert "x9" not in cutout_file
| null |
astrocut/tests/test_cube_cut.py
|
test_cube_cut.py
|
py
| 12,764 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "astropy.io.fits.open",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "numpy.isnan",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.moveaxis",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "make_cube.CubeFactory",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "utils_for_test.create_test_ffis",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.getheader",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "astropy.wcs.WCS",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "astropy.wcs",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "numpy.transpose",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.mgrid",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "numpy.append",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "cube_cut.CutoutFactory",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "make_cube.CubeFactory",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "utils_for_test.create_test_ffis",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "cube_cut.CutoutFactory",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "astropy.wcs.WCS",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "astropy.wcs",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "astropy.units.pixel",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "astropy.units.arcmin",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "astropy.units.arcsec",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.getheader",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "make_cube.CubeFactory",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "utils_for_test.create_test_ffis",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "cube_cut.CutoutFactory",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "astropy.wcs.NoWcsKeywordsFoundError",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "astropy.wcs",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "numpy.nan",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "exceptions.InvalidQueryError",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "exceptions.InvalidQueryError",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "make_cube.CubeFactory",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "utils_for_test.create_test_ffis",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "cube_cut.CutoutFactory",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "astropy.units.pixel",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "astropy.units.arcmin",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "astropy.units.pixel",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "pytest.warns",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "exceptions.InputWarning",
"line_number": 375,
"usage_type": "argument"
}
] |
511985183
|
from threading import Thread
import numpy as np
import pandas as pd
from flask import Flask, jsonify, request
from clf import run, load_model
from flask_swagger_ui import get_swaggerui_blueprint
app = Flask(__name__)
### swagger specific ###
SWAGGER_URL = '/swagger'
API_URL = '/static/swagger.json'
SWAGGERUI_BLUEPRINT = get_swaggerui_blueprint(
SWAGGER_URL,
API_URL,
config={
'app_name': "Credit-Card-Fraud-Detection-ML"
}
)
app.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)
### end swagger specific ###
@app.route("/")
def index():
return "welcome to news clf"
@app.route("/train")
def train_new_model():
trainer_job = Thread(target=run)
trainer_job.start()
return "training started !!"
@app.route("/load_model")
def load_new_model():
cur_model = load_model()
return "model updated !"
@app.route("/clf", methods=['POST']) # for test use postman and in test tab put this [{"data":""}]
def classify():
# load doc
if request.is_json:
print("Valid Json")
else:
return jsonify({
"Bad Request": "Not Valid Json Request , Please check the Keys and values numbers "
})
data = request.json['data']
print(data)
print(type(data))
cur_model = load_model()
data = pd.DataFrame.from_dict(data, orient='index')
data = data.values.reshape(-1, 30)
pred = cur_model.predict(data)
conf = cur_model.predict_proba(data)
print(type(pred))
print(pred)
# Replace every 0 with Valid and every 1 with Fraud in prediction
mapping = {1: 'Fraud', 0: 'Valid'}
map = lambda x: mapping.get(x, x)
pred = np.vectorize(map)(pred)
print(pred)
# Converting the numpyArray to list so jsonify can convert it to json
pred = pred.tolist()
conf = conf.tolist()
return jsonify({
"transaction": pred[0],
"Confidence": {"Valid": conf[0][0], "Fraud": conf[0][1]}
})
| null |
routes.py
|
routes.py
|
py
| 1,950 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_swagger_ui.get_swaggerui_blueprint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "clf.run",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "clf.load_model",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask.request.is_json",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "clf.load_model",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "numpy.vectorize",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 75,
"usage_type": "call"
}
] |
348820320
|
import json
from io import BytesIO
from unittest import mock
import boto3
import numpy as np
import pytest
from PIL import Image
from ..conftest import AWS_ENDPOINT_URI
from ..fixtures.payloads import umd_glad_alerts_payload, umd_tree_cover_loss_payload
@pytest.mark.e2e
@pytest.mark.parametrize("x, y, multiplier", [(0, 0, 1), (1, 1, 4)])
def test_dynamic_tiles_no_params(x, y, multiplier, client):
"""
Test dynamic tile cache
:param x: x block coordinate for tile
:param y: y block coordinate for tile
:param multiplier: the test tile has multiplier for each x, y block. This is used to check the expected result values.
"""
try:
response = client.get(
f"/wur_radd_alerts/v20201214/dynamic/14/{x}/{y}.png",
params={"implementation": "default"},
stream=True,
)
assert (
response.status_code == 200
), f"Bad response for request {response.request.url}: {response.json()}"
img = _response_to_img(response)
_check_png(img, multiplier)
# check if s3 file copied. It should now be accessible using the default endpoint.
saved_bytes = BytesIO()
s3_client = boto3.client("s3", endpoint_url=AWS_ENDPOINT_URI)
s3_client.download_fileobj(
"gfw-tiles-test",
f"wur_radd_alerts/v20201214/default/14/{x}/{y}.png",
saved_bytes,
)
saved_bytes.seek(0)
_check_png(saved_bytes, multiplier)
finally:
_get_logs()
@pytest.mark.e2e
@pytest.mark.parametrize("x, y, confirmed_only", [(0, 0, False), (0, 0, True)])
def test_dynamic_tiles_params(x, y, confirmed_only, client):
"""
Test dynamic tile cache end to end
"""
try:
response = client.get(
f"/wur_radd_alerts/v20201214/dynamic/14/{x}/{y}.png",
params={"confirmed_only": confirmed_only},
stream=True,
)
assert (
response.status_code == 200
), f"Bad response for request {response.request.url}: {response.json()}"
img = _response_to_img(response)
_check_filtered_png(img, confirmed_only)
# check if s3 file copied. It should now be accessible using the default endpoint.
# saved_bytes = BytesIO()
# s3_client = boto3.client("s3", endpoint_url=AWS_ENDPOINT_URI)
# s3_client.download_fileobj(
# "gfw-tiles-test",
# f"wur_radd_alerts/v20201214/default/14/{x}/{y}.png",
# saved_bytes,
# )
# saved_bytes.seek(0)
# _check_png(saved_bytes, multiplier)
finally:
_get_logs()
@pytest.mark.parametrize(
"params, payload",
[
umd_tree_cover_loss_payload(),
umd_glad_alerts_payload(),
umd_tree_cover_loss_payload(z=13),
umd_glad_alerts_payload(z=15),
],
)
def test_dynamic_tiles_named(params, payload, client, mock_get_dynamic_tile):
"""Only testing if payload is correctly forwarded to lambda.
Lambda execution should be handled by a separate test.
"""
dataset = payload["dataset"]
version = payload["version"]
x = payload["x"]
y = payload["y"]
z = payload["z"]
print(payload)
# This will mock the lambda function and return the payload
if dataset == "umd_tree_cover_loss":
mock_patch = f"app.routes.{dataset}.raster_tiles.get_cached_response"
else:
mock_patch = "app.routes.dynamic_deforestation_alerts_tile.get_cached_response"
with mock.patch(mock_patch) as mck:
mck.side_effect = mock_get_dynamic_tile
response = client.get(
f"/{dataset}/{version}/dynamic/{z}/{x}/{y}.png", params=params, stream=True
)
assert (
response.status_code == 200
), f"Bad response for request {response.request.url}: {response.json()}"
expected_response = {"data": payload, "status": "success"}
rsp = _response_to_img(response)
assert json.loads(rsp.read()) == expected_response
def _response_to_img(response):
response.raw.decode_content = True
image_bytes = BytesIO()
for chunk in response:
image_bytes.write(chunk)
image_bytes.seek(0)
return image_bytes
def _check_png(image_bytes, multiplier):
image = Image.open(image_bytes)
rgb = np.array(image)
assert rgb.shape == (256, 256, 3)
assert np.all(rgb[:, :, 0] == 1 * multiplier)
assert np.all(rgb[:, :, 1] == 2 * multiplier)
assert np.all(rgb[:, :, 2] == 3 * multiplier)
def _check_filtered_png(image_bytes, confirmed_only):
image = Image.open(image_bytes)
rgba = np.array(image)
assert rgba.shape == (256, 256, 4)
assert np.all(rgba[:, :, 0] == 228)
assert np.all(rgba[:, :, 1] == 102)
assert np.all(rgba[:, :, 2] == 153)
if confirmed_only:
assert np.all(rgba[:, :, 3] == 0)
else:
assert np.all(rgba[:, :, 3] == 150)
def _get_logs():
log_client = boto3.client(
"logs", region_name="us-east-1", endpoint_url=AWS_ENDPOINT_URI
)
log_group_name = "/aws/lambda/test_project-lambda-tiler"
for log_stream in log_client.describe_log_streams(logGroupName=log_group_name)[
"logStreams"
]:
log_events = log_client.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream["logStreamName"],
)["events"]
for event in log_events:
# for some reason stack traces come with carriage returns,
# which overwrites the line instead of making a new line
message = event["message"].replace("\r", "\n")
print(f"{log_stream['logStreamName']}: {message}")
| null |
tests/routes/test_raster_tiles.py
|
test_raster_tiles.py
|
py
| 5,700 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "io.BytesIO",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "conftest.AWS_ENDPOINT_URI",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "pytest.mark",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.patch",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "fixtures.payloads.umd_tree_cover_loss_payload",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "fixtures.payloads.umd_glad_alerts_payload",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "fixtures.payloads.umd_tree_cover_loss_payload",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "fixtures.payloads.umd_glad_alerts_payload",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "conftest.AWS_ENDPOINT_URI",
"line_number": 162,
"usage_type": "name"
}
] |
48830805
|
#!/usr/bin/env python
import os
import pytest
from gluon.contrib.webclient import WebClient
import copy
"""
These tests run based on webclient and need web2py server running.
"""
new_innovation_data = dict(name='This is an innovation generated from automated tests',
overview='This is an innovation generated from automated tests',
type="Managerial",
level_1="Crops",
level_2="All crops",
level_3="All crops",
enterprise_size="Small",
value_chain="Other",
location="This is an innovation generated from automated tests",
application_id="1",
ipr_protection_id="2",
impacts="This is an innovation generated from automated tests",
requirements="This is an innovation generated from automated tests",
innovator="This is an innovation generated from automated tests",
researcher="This is an innovation generated from automated tests",
contact="This is an innovation generated from automated tests",
keywords="This is an innovation generated from automated tests",
list_of_publications="This is an innovation generated from automated tests"
)
def test_index_exists(client):
client.get('/index')
assert client.status == 200
assert "PARI" in client.text
# # In case of widgets, I recommend you use re.findall() or re.search() to
# # see if your field was rendered using the right HTML tag.
# # Examples: <input type="text"...>, <input type="checkbox"...>,
# # <textarea...>, etc.
def test_validate_show_existing_in_db(client, web2py):
"""when accessing an existing in DB innovation we should show this innovation"""
pytest.skip("long test")
for row in web2py.db(web2py.db.innovation.id).select():
innovation_id = str(row.id)
client.get('/show/' + innovation_id)
assert client.status == 200
assert "Overview" in client.text
def test_validate_show_non_existing_innov(client, web2py):
"""when accessing a non-existing innovation we should get a redirect to home page"""
pytest.skip("long test")
assert isinstance(client, WebClient)
innovation_id = set(row.id for row in web2py.db(web2py.db.innovation.id).select())
non_existing_ids = set(list(range(1, max(innovation_id)))) - innovation_id
for id in list(non_existing_ids):
client.get('/show/' + str(id))
assert client.status == 200
assert "PARI" in client.text
assert client.history != list()
client.history = list()
def test_validate_new(client, web2py):
"""access a page that requires login"""
data = dict(email=os.environ['WEB2PY_LOGIN'],
password=os.environ['WEB2PY_PASSWORD'],
_formname='login')
client.post('/user/login', data=data)
client.get('/new')
assert client.status == 200
assert "Create New Innovation" in client.text
def test_save_new_innovation_should_create_new_innovation(client, web2py):
"""create new innovation"""
client.post('/new', data=new_innovation_data)
assert client.status == 200
assert "Download as PDF" in client.text
# assert web2py.db(web2py.db.people).count() == 1
# assert web2py.db(web2py.db.people.name == data['name']).count() == 1
def test_save_new_innovation_without_application_id_should_fail(client, web2py):
"""
if one of the values is missing from data
that means we don't have enough info to submit new innovation
therefore we stay on the page /new
"""
for f in new_innovation_data.keys():
data = copy.deepcopy(new_innovation_data)
del data[f]
client.post('/new', data=data)
assert client.status == 200
assert "Create New Innovation" in client.text
#
# def test_get_person_by_creation_date(client, web2py):
# """Is my filter working?
# """
#
# from gluon.contrib.populate import populate
# populate(web2py.db.people, 3) # insert 3 persons with random data
# web2py.db.commit()
# assert web2py.db(web2py.db.people).count() == 3
#
# data = dict(
# name='John Smith',
# phone='3322-4455',
# created_at='1999-04-03 18:00:00')
#
# web2py.db.people.insert(**data) # insert my controlled person
# web2py.db.commit()
#
# client.get('/people/get_by_creation_date.json/' +
# data['created_at'].split()[0])
# assert client.status == 200
# assert 'application/json' in client.headers['content-type']
#
# import json
# person = json.loads(client.text)
# assert person['name'] == data['name']
| null |
tests/controllers/test_default_controller_webclient.py
|
test_default_controller_webclient.py
|
py
| 4,962 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pytest.skip",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pytest.skip",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "gluon.contrib.webclient.WebClient",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "os.environ",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 99,
"usage_type": "call"
}
] |
304140543
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from ctypes import c_double, c_int
from mpi4py import MPI
import numpy as np
import sys
from lammps import lammps
from pympipool import (
interface_connect,
interface_send,
interface_shutdown,
interface_receive,
)
__author__ = "Sarath Menon, Jan Janssen"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "[email protected]"
__status__ = "production"
__date__ = "Feb 28, 2020"
# dict for extract atom methods
atom_properties = {
"x": {"type": 3, "gtype": 1, "dim": 3},
"mass": {"type": 2, "gtype": 1, "dim": 1},
"id": {"type": 0, "gtype": 0, "dim": 1},
"type": {"type": 0, "gtype": 0, "dim": 1},
"mask": {"type": 0, "gtype": 0, "dim": 1},
"v": {"type": 3, "gtype": 1, "dim": 3},
"f": {"type": 3, "gtype": 1, "dim": 3},
"molecule": {"type": 0, "gtype": 0, "dim": 1},
"q": {"type": 2, "gtype": 1, "dim": 1},
"mu": {"type": 3, "gtype": 1, "dim": 3},
"omega": {"type": 3, "gtype": 1, "dim": 3},
"angmom": {"type": 3, "gtype": 1, "dim": 3},
"torque": {"type": 3, "gtype": 1, "dim": 3},
"radius": {"type": 2, "gtype": 1, "dim": 1},
"image": {"type": 2, "gtype": 0, "dim": 3},
# we can add more quantities as needed
# taken directly from atom.cpp -> extract()
}
def extract_compute(job, funct_args):
def convert_data(val, type, length, width):
data = []
if type == 2:
for i in range(length):
dummy = []
for j in range(width):
dummy.append(val[i][j])
data.append(np.array(dummy))
data = np.array(data)
elif type == 1:
for i in range(length):
data.append(val[i])
data = np.array(data)
else:
data = val
return data
id = funct_args[0]
style = funct_args[1]
type = funct_args[2]
length = funct_args[3]
width = funct_args[4]
filtered_args = [id, style, type]
if style == 0:
val = job.extract_compute(*filtered_args)
return convert_data(val=val, type=type, length=length, width=width)
elif style == 1: # per atom property
val = _gather_data_from_all_processors(
data=job.numpy.extract_compute(*filtered_args)
)
if MPI.COMM_WORLD.rank == 0:
length = job.get_natoms()
return convert_data(val=val, type=type, length=length, width=width)
else: # Todo
raise ValueError("Local style is currently not supported")
def get_version(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.version()
def get_file(job, funct_args):
job.file(*funct_args)
return 1
def commands_list(job, funct_args):
job.commands_list(*funct_args)
return 1
def commands_string(job, funct_args):
job.commands_string(*funct_args)
return 1
def extract_setting(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.extract_setting(*funct_args)
def extract_global(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.extract_global(*funct_args)
def extract_box(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.extract_box(*funct_args)
def extract_atom(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
# extract atoms return an internal data type
# this has to be reformatted
name = str(funct_args[0])
if not name in atom_properties.keys():
return []
# this block prevents error when trying to access values
# that do not exist
try:
val = job.extract_atom(name, atom_properties[name]["type"])
except ValueError:
return []
# this is per atom quantity - so get
# number of atoms - first dimension
natoms = job.get_natoms()
# second dim is from dict
dim = atom_properties[name]["dim"]
data = []
if dim > 1:
for i in range(int(natoms)):
dummy = [val[i][x] for x in range(dim)]
data.append(dummy)
else:
data = [val[x] for x in range(int(natoms))]
return np.array(data)
def extract_fix(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.extract_fix(*funct_args)
def extract_variable(job, funct_args):
# in the args - if the third one,
# which is the type is 1 - a lammps array is returned
if funct_args[2] == 1:
data = _gather_data_from_all_processors(
data=job.numpy.extract_variable(*funct_args)
)
if MPI.COMM_WORLD.rank == 0:
return np.array(data)
else:
if MPI.COMM_WORLD.rank == 0:
# if type is 1 - reformat file
try:
data = job.extract_variable(*funct_args)
except ValueError:
return []
return data
def get_natoms(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.get_natoms()
def set_variable(job, funct_args):
return job.set_variable(*funct_args)
def reset_box(job, funct_args):
job.reset_box(*funct_args)
return 1
def gather_atoms(job, funct_args):
# extract atoms return an internal data type
# this has to be reformatted
name = str(funct_args[0])
if name not in atom_properties.keys():
return []
# this block prevents error when trying to access values
# that do not exist
try:
val = job.gather_atoms(
name, atom_properties[name]["gtype"], atom_properties[name]["dim"]
)
except ValueError:
return []
# this is per atom quantity - so get
# number of atoms - first dimension
val = list(val)
dim = atom_properties[name]["dim"]
if dim > 1:
data = [val[x : x + dim] for x in range(0, len(val), dim)]
else:
data = list(val)
return np.array(data)
def gather_atoms_concat(job, funct_args):
# extract atoms return an internal data type
# this has to be reformatted
name = str(funct_args[0])
if not name in atom_properties.keys():
return []
# this block prevents error when trying to access values
# that do not exist
try:
val = job.gather_atoms_concat(
name, atom_properties[name]["gtype"], atom_properties[name]["dim"]
)
except ValueError:
return []
# this is per atom quantity - so get
# number of atoms - first dimension
val = list(val)
dim = atom_properties[name]["dim"]
if dim > 1:
data = [val[x : x + dim] for x in range(0, len(val), dim)]
else:
data = list(val)
return np.array(data)
def gather_atoms_subset(job, funct_args):
# convert to ctypes
name = str(funct_args[0])
lenids = int(funct_args[1])
ids = funct_args[2]
# prep ids
cids = (lenids * c_int)()
for i in range(lenids):
cids[i] = ids[i]
if not name in atom_properties.keys():
return []
# this block prevents error when trying to access values
# that do not exist
try:
val = job.gather_atoms_subset(
name,
atom_properties[name]["gtype"],
atom_properties[name]["dim"],
lenids,
cids,
)
except ValueError:
return []
# this is per atom quantity - so get
# number of atoms - first dimension
val = list(val)
dim = atom_properties[name]["dim"]
if dim > 1:
data = [val[x : x + dim] for x in range(0, len(val), dim)]
else:
data = list(val)
return np.array(data)
def create_atoms(job, funct_args):
job.create_atoms(*funct_args)
return 1
def has_exceptions(job, funct_args):
return job.has_exceptions
def has_gzip_support(job, funct_args):
return job.has_gzip_support
def has_png_support(job, funct_args):
return job.has_png_support
def has_jpeg_support(job, funct_args):
return job.has_jpeg_support
def has_ffmpeg_support(job, funct_args):
return job.has_ffmpeg_support
def installed_packages(job, funct_args):
return job.installed_packages
def set_fix_external_callback(job, funct_args):
job.set_fix_external_callback(*funct_args)
return 1
def get_neighlist(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.get_neighlist(*funct_args)
def find_pair_neighlist(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.find_pair_neighlist(*funct_args)
def find_fix_neighlist(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.find_fix_neighlist(*funct_args)
def find_compute_neighlist(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.find_compute_neighlist(*funct_args)
def get_neighlist_size(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.get_neighlist_size(*funct_args)
def get_neighlist_element_neighbors(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return job.get_neighlist_element_neighbors(*funct_args)
def get_thermo(job, funct_args):
if MPI.COMM_WORLD.rank == 0:
return np.array(job.get_thermo(*funct_args))
def scatter_atoms(job, funct_args):
name = str(funct_args[0])
py_vector = funct_args[1]
# now see if its an integer or double type- but before flatten
py_vector = np.array(py_vector).flatten()
if atom_properties[name]["gtype"] == 0:
c_vector = (len(py_vector) * c_int)(*py_vector)
else:
c_vector = (len(py_vector) * c_double)(*py_vector)
job.scatter_atoms(
name, atom_properties[name]["gtype"], atom_properties[name]["dim"], c_vector
)
return 1
def scatter_atoms_subset(job, funct_args):
name = str(funct_args[0])
lenids = int(funct_args[2])
ids = funct_args[3]
# prep ids
cids = (lenids * c_int)()
for i in range(lenids):
cids[i] = ids[i]
py_vector = funct_args[1]
# now see if its an integer or double type- but before flatten
py_vector = np.array(py_vector).flatten()
if atom_properties[name]["gtype"] == 0:
c_vector = (len(py_vector) * c_int)(*py_vector)
else:
c_vector = (len(py_vector) * c_double)(*py_vector)
job.scatter_atoms_subset(
name,
atom_properties[name]["gtype"],
atom_properties[name]["dim"],
lenids,
cids,
c_vector,
)
return 1
def command(job, funct_args):
job.command(funct_args)
return 1
def select_cmd(argument):
"""
Select a lammps command
Args:
argument (str): [close, extract_compute, get_thermo, scatter_atoms, command, gather_atoms]
Returns:
function: the selected function
"""
switcher = {
f.__name__: f
for f in [
extract_compute,
get_version,
get_file,
commands_list,
commands_string,
extract_setting,
extract_global,
extract_box,
extract_atom,
extract_fix,
extract_variable,
get_natoms,
set_variable,
reset_box,
gather_atoms_concat,
gather_atoms_subset,
scatter_atoms_subset,
create_atoms,
has_exceptions,
has_gzip_support,
has_png_support,
has_jpeg_support,
has_ffmpeg_support,
installed_packages,
set_fix_external_callback,
get_neighlist,
find_pair_neighlist,
find_fix_neighlist,
find_compute_neighlist,
get_neighlist_size,
get_neighlist_element_neighbors,
get_thermo,
scatter_atoms,
command,
gather_atoms,
]
}
return switcher.get(argument)
def _gather_data_from_all_processors(data):
data_gather = MPI.COMM_WORLD.gather(data, root=0)
if MPI.COMM_WORLD.rank == 0:
data = []
for vl in data_gather:
for v in vl:
data.append(v)
return data
def _run_lammps_mpi(argument_lst):
index_selected = argument_lst.index("--zmqport")
port_selected = argument_lst[index_selected + 1]
if "--host" in argument_lst:
index_selected = argument_lst.index("--host")
host = argument_lst[index_selected + 1]
else:
host = "localhost"
argument_red_lst = argument_lst[:index_selected]
if MPI.COMM_WORLD.rank == 0:
context, socket = interface_connect(host=host, port=port_selected)
else:
context, socket = None, None
# Lammps executable
args = ["-screen", "none"]
if len(argument_red_lst) > 1:
args.extend(argument_red_lst[1:])
job = lammps(cmdargs=args)
while True:
if MPI.COMM_WORLD.rank == 0:
input_dict = interface_receive(socket=socket)
else:
input_dict = None
input_dict = MPI.COMM_WORLD.bcast(input_dict, root=0)
if "shutdown" in input_dict.keys() and input_dict["shutdown"]:
job.close()
if MPI.COMM_WORLD.rank == 0:
interface_send(socket=socket, result_dict={"result": True})
interface_shutdown(socket=socket, context=context)
break
output = select_cmd(input_dict["command"])(
job=job, funct_args=input_dict["args"]
)
if MPI.COMM_WORLD.rank == 0 and output is not None:
interface_send(socket=socket, result_dict={"result": output})
if __name__ == "__main__":
_run_lammps_mpi(argument_lst=sys.argv)
| null |
pylammpsmpi/mpi/lmpmpi.py
|
lmpmpi.py
|
py
| 13,911 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "ctypes.c_double",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "ctypes.c_int",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_number": 382,
"usage_type": "name"
},
{
"api_name": "ctypes.c_double",
"line_number": 384,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD.gather",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 456,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 457,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 457,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 474,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 474,
"usage_type": "name"
},
{
"api_name": "pympipool.interface_connect",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "lammps.lammps",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 484,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "pympipool.interface_receive",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD.bcast",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 488,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 488,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 491,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "pympipool.interface_send",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "pympipool.interface_shutdown",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 498,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "pympipool.interface_send",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 503,
"usage_type": "attribute"
}
] |
227304193
|
import os
from unittest import mock
import pytest
from django.conf import settings
from django.test import TestCase
from metaci.build.management.commands.run_build import Command
from metaci.build.models import Build
from metaci.conftest import (
BranchFactory,
OrgFactory,
PlanFactory,
PlanRepositoryFactory,
RepositoryFactory,
UserFactory,
)
BUILD_TIMEOUT = 100
@mock.patch("metaci.build.management.commands.run_build.scratch_org_limits")
@mock.patch("metaci.build.management.commands.run_build.run_build")
@pytest.mark.django_db
class TestRunBuild(TestCase):
def test_run_build_persistent(self, run_build, scratch_org_limits):
self._testrun_build(run_build, scratch_org_limits, False, False)
def test_run_build_scratch(self, run_build, scratch_org_limits):
self._testrun_build(run_build, scratch_org_limits, True, False)
def _testrun_build(self, run_build, scratch_org_limits, org_is_scratch, no_lock):
repo = RepositoryFactory(name="myrepo")
branch = BranchFactory(name="mybranch", repo=repo)
plan = PlanFactory(name="myplan", org="myorg", build_timeout=BUILD_TIMEOUT)
PlanRepositoryFactory(repo=repo, plan=plan)
user = UserFactory(username="username")
org = OrgFactory(name="myorg", repo=repo, scratch=org_is_scratch)
os.environ["DYNO"] = "worker.42"
build_pk = None
def side_effect(build_id):
nonlocal build_pk
build_pk = build_id
run_build.side_effect = side_effect
scratch_org_limits.return_value = mock.Mock()
scratch_org_limits.return_value.remaining = settings.SCRATCH_ORG_RESERVE + 5
c = Command()
c.handle("myrepo", "mybranch", "commit", "myplan", "username", no_lock=no_lock)
assert build_pk
build = Build.objects.get(pk=build_pk)
assert not build.task_id_check # wasn't queued
assert build.build_type == "manual-command"
assert build.user == user
assert build.repo == repo
assert build.branch == branch
assert build.plan == plan
assert build.org == org
@mock.patch("metaci.build.management.commands.run_build.lock_org")
def test_run_build_sets_lock(self, lock_org, run_build, scratch_org_limits):
self._testrun_build(run_build, scratch_org_limits, False, False)
assert lock_org.mock_calls[0][1][2] == 100
@mock.patch("metaci.build.management.commands.run_build.lock_org")
def test_run_build_can_skip_lock(self, lock_org, run_build, scratch_org_limits):
self._testrun_build(run_build, scratch_org_limits, False, True)
assert not lock_org.mock_calls
| null |
metaci/build/management/commands/tests/test_run_build.py
|
test_run_build.py
|
py
| 2,683 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.test.TestCase",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "metaci.conftest.RepositoryFactory",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "metaci.conftest.BranchFactory",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "metaci.conftest.PlanFactory",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "metaci.conftest.PlanRepositoryFactory",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "metaci.conftest.UserFactory",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "metaci.conftest.OrgFactory",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.SCRATCH_ORG_RESERVE",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "metaci.build.management.commands.run_build.Command",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "metaci.build.models.Build.objects.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "metaci.build.models.Build.objects",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "metaci.build.models.Build",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pytest.mark",
"line_number": 24,
"usage_type": "attribute"
}
] |
117448425
|
# Name: Renacin Matadeen
# Date: 04/20/2020
# Title Open CV Research
#
# ----------------------------------------------------------------------------------------------------------------------
import cv2
import pandas as pd
import numpy as np
# ----------------------------------------------------------------------------------------------------------------------
"""
Notes:
+ Resizing Will Destroy Detail, But Can Help With Processing Speed
- Be careful when resizing images, 1/2x Smaller, 2x Bigger
"""
def process_image(image):
# Convert To GrayScale | Resize Image, Make 1/32 Version
scale_factor = 2
im_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
im_small = cv2.resize(im, (0,0), fx= 1/scale_factor, fy= 1/scale_factor)
# Remove Noise From Image | Must Be Bigger Than 3, & Support A Middle Origin Point | Uneven Numbers Only?
blur_kern_size = 3
img_blurred = cv2.GaussianBlur(im_small,(blur_kern_size, blur_kern_size), 0)
# Calculate Edge Detection
img_edge = cv2.Laplacian(img_blurred, cv2.CV_64F)
# Resize Image To Original Dimensions
processed_image = cv2.resize(img_edge, (0,0), fx= scale_factor, fy= scale_factor)
# Return Image
return processed_image
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
# Choose Image To Work On
in_path = r"C:\Users\renac\Documents\Programming\Python\CanadianCensus_DataCleaning\Research\Images\Image.jpg"
im = cv2.imread(in_path)
# Process Image
proc_img = process_image(im)
# Write Image
cv2.imwrite(r"C:\Users\renac\Documents\Programming\Python\CanadianCensus_DataCleaning\Research\Images\Processed_Image.jpg", proc_img)
| null |
Misc/OpenCv_Research.py
|
OpenCv_Research.py
|
py
| 1,952 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.cvtColor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.Laplacian",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 46,
"usage_type": "call"
}
] |
442793660
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.urls import reverse
from . models import *
from django.template.loader import render_to_string
from json import loads, dumps
from django.core.paginator import Paginator
from django.db.models import F, Value, Q
from .forms import ShippingAddressForm, RegisterForm, LoginForm, CustomUserCreationForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
# Create your views here.
def index(request):
render_context = {}
topbar = TopBar.objects.filter(
Active=True).prefetch_related('Images').first()
topbar_html = render_to_string(
'store/components/Topbar/topbar.html', {'topbar_images': topbar.Images.all()})
slider = Slider.objects.filter(Active=True).prefetch_related(
'Slide_pc', 'Slide_mob').first()
slider_html = render_to_string('store/components/slider/slider.html', {'slides_pc': slider.Slide_pc.all(), 'slides_mob': slider.Slide_mob.all(
), 'sliderCount_pc': range(0, slider.SliderCount_pc), 'sliderCount_mob': range(0, slider.SliderCount_mob)})
collection = Collection.objects.get(pk=1)
collection_html = render_to_string(
'store/components/collection/collection.html', {'collection': collection})
categories = Categorey.objects.all()
# categorey_html = render_to_string('store/components/section/section.html',{'categorey':categorey.All_products.all(),'categorey_name':categorey.Name})
render_context['topbar'] = topbar_html
render_context['slider'] = slider_html
render_context['collection'] = collection_html
render_context['categories'] = categories
render_context['brands'] = Brand.objects.all()
return render(request, 'store/index.html', render_context)
def product_view(request, ptype, p_id, v_id=None):
render_context = {}
if request.method == 'POST':
print('POST Request')
else:
if 'query' in request.GET:
last_variant = None
all_variants = Variant.objects.filter(Product__id=p_id)
for param in request.GET:
if param != 'query':
if last_variant == None:
a = all_variants.filter(Attributevalue__Attribute__Name__contains=param.lower(
)).filter(Attributevalue__Value__contains=request.GET[param].lower())
last_variant = a
else:
a = last_variant.filter(Attributevalue__Attribute__Name__contains=param.lower(
)).filter(Attributevalue__Value__contains=request.GET[param].lower())
last_variant = a
# print(last_variant[0].Name)
# print(len(last_variant))
if last_variant.exists():
return HttpResponseRedirect(reverse('product_view', kwargs={'ptype': 'variable', 'p_id': p_id, 'v_id': last_variant[0].pk}))
else:
messages.warning(
request, f"sorry! we current don't have that variant")
return HttpResponseRedirect(request.path)
else:
# if normal product
if ptype == 'normal':
query_product = Product.objects.filter(
pk=p_id).prefetch_related('Its_images')
render_context['product'] = query_product[0]
render_context['ptype'] = 'normal'
render_context['attributes_detail'] = None
# CHECK IF PRODUCT EXISTS IN CART IF AUTHENTICATED USER
if request.user.is_authenticated:
if request.user.Customer.Cart.Cartproduct.filter(Product_id=p_id).exists():
render_context['In_cart'] = True
else:
render_context['In_cart'] = False
else:
render_context['In_cart'] = False
return render(request, 'store/components/view/product/productView.html', render_context)
# if product with Variant
elif ptype == 'variable':
query_product = ProductWithVariant.objects.filter(
pk=p_id).prefetch_related('Variants__Attributevalue__Attribute').first()
# Prepare Attribute Details
render_context['attributes_detail'] = {}
allVars = query_product.Variants.all()
for variant in allVars:
variant_id = variant.id
for attribute_val in variant.Attributevalue.all():
attr = attribute_val.Attribute.Name
val = attribute_val.Value
if attr in render_context['attributes_detail'].keys():
if val not in render_context['attributes_detail'][attr]:
render_context['attributes_detail'][attr].append(
val)
else:
render_context['attributes_detail'][attr] = [val]
# render_context['attributes_detail'][attr].append(val)
render_context['product'] = query_product
render_context['variant'] = query_product.Variants.filter(
pk=v_id).prefetch_related('Images').first()
render_context['ptype'] = 'variable'
render_context['variant_vals'] = []
for attrValue in render_context['variant'].Attributevalue.all():
render_context['variant_vals'].append(attrValue.Value)
# CHECK IF VARIANT EXISTS IN CART IF AUTHENTICATED USER
if request.user.is_authenticated:
if request.user.Customer.Cart.Cartvariant.filter(Variant_id=v_id).exists():
render_context['In_cart'] = True
else:
render_context['In_cart'] = False
else:
render_context['In_cart'] = False
# print(f'---------------{render_context["In_cart"]}-------------')
# USE DJANGO SIGNAL TO CACHE THIS ATTRIBUTE DETAILS
# print(render_context['attributes_detail'])
return render(request, 'store/components/view/product/productView.html', render_context)
def get_meta_data(request):
if request.method == 'POST':
recived_data = loads(request.body)
print(recived_data)
if recived_data['page'] == 'product' or 'productwithvariant':
if recived_data['for'] == 'add':
questions = Product_type.objects.get(
pk=recived_data['pk']).Meta_data
return HttpResponse(dumps({'list': questions}))
else:
productMetaData = Product.objects.get(
pk=recived_data['pk']).Meta_data
return HttpResponse(dumps(productMetaData))
elif recived_data['page'] == 'product_type':
questions = Product_type.objects.get(
pk=recived_data['pk']).Meta_data
return HttpResponse(dumps({'list': questions}))
def handle_product_search(request):
if request.method == 'GET':
render_context = {}
if 'productquery' in request.GET and request.GET['productquery'] != '':
# first check normal product match
print('Searching Name')
productquery = request.GET['productquery']
all_normal_prods = Product.objects.filter(
Name__icontains=productquery)
all_variants = Variant.objects.filter(Name__icontains=productquery)
if all_normal_prods.exists() or all_variants.exists():
print('Name Exists')
normal_prod_detail = all_normal_prods.values(
'Name', 'id', 'Cost_to_customer', 'Crossed_price', 'Thumb_path', 'Highlights', 'Discount', 'Categorey_id', 'Is_normal_product')
variable_prod_details = all_variants.values(
'Name', 'id', 'Cost_to_customer', 'Crossed_price', 'Thumb_path', 'Highlights', 'Discount', 'Product_id', 'Is_normal_product')
final_ordered_qs = variable_prod_details.union(
normal_prod_detail, all=True).order_by('Cost_to_customer')
# print(normal_prod_detail.union(variable_prod_details,all=True))
# print(all_normal_prods.values('id','Categorey_id','Is_normal_product').union(all_variants.values('id','Product_id','Is_normal_product')))
try:
page_no = request.GET['page']
except:
page_no = 1
try:
perpage = request.GET['perpage']
except:
perpage = 2
paginator = Paginator(final_ordered_qs, perpage, orphans=1)
render_context['product_data'] = paginator.get_page(page_no)
# only necassary in via text search (When searched through nav input)
# only to let me know if there was any search result at all
render_context['msg'] = True
# only necassary in via text search (When searched through nav input)
render_context['result_for'] = 'Name'
render_context['productquery'] = productquery
return render(request, 'store/components/view/search/searchView.html', render_context)
else:
# No Match in Name
# now match tags
print('Searching Tags')
all_tagged_normal_products = Product.objects.filter(Tags__Name__icontains=productquery).distinct().values(
'Name', 'id', 'Cost_to_customer', 'Crossed_price', 'Thumb_path', 'Highlights', 'Discount', 'Categorey_id', 'Is_normal_product')
all_tagged_variable_products = ProductWithVariant.objects.values('Variants').filter(
Tags__Name__icontains=productquery).distinct().prefetch_related('Variants')
prod_vars = None
for product in all_tagged_variable_products:
p_vars = product.Variants.all().values('Name', 'id', 'Cost_to_customer', 'Crossed_price',
'Thumb_path', 'Highlights', 'Discount', 'Product_id', 'Is_normal_product')
if prod_vars != None:
prod_vars = p_vars.union(prod_vars)
else:
prod_vars = p_vars
prod_vars.union(all_tagged_normal_products).order_by(
'Cost_to_customer')
if all_tagged_normal_products.exists():
print('Tags Exists')
render_context['product_data'] = all_tagged_normal_products
render_context['msg'] = True
render_context['result_for'] = 'Tags'
return render(request, 'store/components/view/search/searchView.html', render_context)
# return Render
else:
# No Match in Name and Tags
# search Description
print('Searching Description')
all_tagged_normal_products = Product.objects.filter(Description__icontains=productquery).values(
'Name', 'id', 'Cost_to_customer', 'Crossed_price', 'Thumb_path', 'Highlights', 'Discount', 'Categorey_id', 'Is_normal_product')
all_tagged_variable_products = ProductWithVariant.objects.filter(
Description__icontains=productquery).prefetch_related('Variants')
prod_vars = None
for product in all_tagged_variable_products:
p_vars = product.Variants.all().values('Name', 'id', 'Cost_to_customer', 'Crossed_price',
'Thumb_path', 'Highlights', 'Discount', 'Product_id', 'Is_normal_product')
if prod_vars != None:
prod_vars = p_vars.union(prod_vars)
else:
prod_vars = p_vars
prod_vars.union(all_tagged_normal_products).order_by(
'Cost_to_customer')
if all_tagged_normal_products.exists():
print('Description Exists')
render_context['product_data'] = all_tagged_normal_products
render_context['variant_details'] = prod_vars
render_context['msg'] = True
render_context['result_for'] = 'Description'
return render(request, 'store/components/view/search/searchView.html', render_context)
else:
print('Final No result')
render_context['msg'] = False
render_context['result_for'] = None
return render(request, 'store/components/view/search/searchView.html', render_context)
elif request.GET['for'] == 'brandfilter':
print(f'--------ID: {request.GET["querybrandId"]}---------')
brandId = request.GET["querybrandId"]
# BELOW TWO ARE FOR TESTING PURPOSES
# all_normal_prods = Product.objects.filter(Brand_id=request.GET['querybrandId']).values('Is_normal_product','id')
# all_variants = Variant.objects.filter(Product__Brand_id=request.GET['querybrandId']).select_related('Product').values('Is_normal_product','id')
all_normal_prods = Product.objects.filter(Brand_id=request.GET['querybrandId']).values(
'Is_normal_product', 'id', 'Name', 'Cost_to_customer', 'Crossed_price', 'Thumb_path', 'Highlights', 'Discount', 'Categorey_id', 'Meta_data')
all_variants = Variant.objects.filter(Product__Brand_id=request.GET['querybrandId']).select_related('Product').values(
'Is_normal_product', 'id', 'Name', 'Cost_to_customer', 'Crossed_price', 'Thumb_path', 'Highlights', 'Discount', 'Product_id', 'AttrCache')
if all_normal_prods.exists() and all_variants.exists():
# render_context['product_data'] contans
# all Items (product+variant) details
render_context['product_data'] = all_variants.union(
all_normal_prods, all=True)
else:
if not all_normal_prods.exists() and not all_variants.exists():
# Just any Empty Queryset
render_context['product_data'] = all_normal_prods
elif not all_normal_prods.exists() and all_variants.exists():
render_context['product_data'] = all_variants
elif all_normal_prods.exists() and not all_variants.exists():
render_context['product_data'] = all_normal_prods
print(render_context['product_data'])
render_context['msg'] = True
render_context['result_for'] = 'brandfilter'
# print(dfd)
return render(request, 'store/components/view/search/searchView.html', render_context)
else:
print('No Valid query!')
render_context['msg'] = False
render_context['result_for'] = None
return render(request, 'store/components/view/search/searchView.html', render_context)
else:
print('Error')
return HttpResponse('<h1>Forbbiden</h1>')
def selected_variant(request, p_id, v_id):
pass
def handleCart(request):
if request.method == 'POST':
print(request.POST)
def addItemToCart(request):
if request.method == 'POST':
recivedData = loads(request.body)
print(recivedData)
if(recivedData['ptype'] == 'variable'):
# Implies have to add an Varinat to the cart
# add items in variant
if request.user.Customer.Cart.Cartvariant.all().filter(Variant_id=recivedData['variantId']).count() == 0:
from datetime import datetime
CartVariant.objects.create(
Variant=Variant.objects.get(pk=recivedData['variantId']),
Quantity=recivedData['quantity'],
Added=datetime.fromtimestamp(int(recivedData['time'])),
Cart=request.user.Customer.Cart
)
return HttpResponse('added', status=200)
else:
print('dublicate')
return HttpResponse('dublicate')
elif (recivedData['ptype'] == 'normal'):
if Cart.objects.get(pk=request.user.Customer.Cart_id).Cartproduct.all().filter(Product_id=recivedData['productId']).count() == 0:
from datetime import datetime
CartProduct.objects.create(
Product=Product.objects.get(pk=recivedData['productId']),
Quantity=recivedData['quantity'],
Cart=request.user.Customer.Cart,
Added=datetime.fromtimestamp(int(recivedData['time']))
)
return HttpResponse('added', status=200)
else:
print('dublicate')
return HttpResponse('dublicate')
@login_required
def viewCart(request):
if request.method == "POST":
data = loads(request.body)
print(data)
if data['ptype'] == 'normal':
CartProduct.objects.filter(id=data['itemId']).delete()
else:
CartVariant.objects.filter(id=data['itemId']).delete()
render_context = {}
cartId = request.user.Customer.Cart_id
# VERSION 1
# cart = Cart.objects.prefetch_related('Cartproduct__Product','Cartvariant__Variant').get(pk = request.user.Customer.Cart_id)
# allCartproducts = CartProduct.objects.filter(Cart_id = cart.id).select_related('Product')
# allCartvariants = CartVariant.objects.filter(Cart_id = cart.id).select_related('Variant')
# VERSION 2
# allCartproducts = cart.Cartproduct.all().order_by('Added')
# allCartvariants = cart.Cartvariant.all().order_by('Added')
# VERSION 3
# required to do annotation because Attrcache does not exist
# on product therfore tricked to get attrcache through "attributes" annotation
allCartproducts = CartProduct.objects.annotate(attributes=F('Product__Meta_data')).filter(Cart_id=cartId).select_related('Product').values(
'id', 'Product__Is_variant', 'Product__Name', 'Product__Thumb_path', 'Product__Crossed_price', 'Product__Cost_to_customer', 'Quantity', 'Product__Discount', 'attributes', 'Added', 'Product_id', 'Product__id')
allCartvariants = CartVariant.objects.annotate(attributes=F('Variant__AttrCache')).filter(Cart_id=cartId).select_related('Variant').values(
'id', 'Variant__Is_variant', 'Variant__Name', 'Variant__Thumb_path', 'Variant__Crossed_price', 'Variant__Cost_to_customer', 'Quantity', 'Variant__Discount', 'attributes', 'Added', 'Variant__id', 'Variant__Product_id')
# allCartProd = CartProduct.objects.Product.all()
# for product in allCartProd:
# product
# allCartVar = CartVariant.objects.Variant.all()
# TESTING VERSION
# allCartproducts = CartProduct.objects.annotate(ptype='product').filter(Cart_id = cartId).select_related('Products').values('Product__Is_variant','Added')
# allCartvariants = CartVariant.objects.annotate(ptype='variant').filter(Cart_id = cartId).select_related('Variant').values('Variant__Is_variant','Added')
# allProds = []
# allVars = []
# for cartprod in allCartproducts:
# product = cartprod.Product
# allProds.append(product)
# for cartvar in allCartvariants:
# variant = cartvar.Variant
# allVars.append(variant)
# allItems = allCartproducts.union(allCartvariants,all=True).order_by('Added')
render_context['allItems'] = allCartvariants.union(
allCartproducts, all=True).order_by('-Added')
try:
render_context['total_amt_customer'] = sum(
[Item['Variant__Cost_to_customer'] for Item in render_context['allItems']])
render_context['total_amt_crossed'] = sum(
[Item['Variant__Crossed_price'] for Item in render_context['allItems']])
render_context['saved_amt'] = render_context['total_amt_crossed'] - \
render_context['total_amt_customer']
render_context['saved_amt_percent'] = round(
((render_context['saved_amt'])/render_context['total_amt_crossed'])*100)
except:
render_context['total_amt_customer'] = 0
render_context['total_amt_crossed'] = 0
render_context['saved_amt_percent'] = 0
render_context['numItems'] = render_context['allItems'].count()
# print(allItems)
return render(request, 'store/components/view/cart/baseCart.html', render_context)
@login_required
def orderpage(request):
if request.user.is_authenticated:
cartId = request.user.Customer.Cart_id
print(cartId)
allCartprods = CartProduct.objects.filter(Cart_id=cartId).exists()
allCartvars = CartVariant.objects.filter(Cart_id=cartId).exists()
if allCartprods or allCartvars:
# proceed as normal, start address selection process
render_context = {}
allCartproducts = CartProduct.objects.filter(Cart_id=cartId).select_related(
'Product').values('Product__Crossed_price', 'Product__Cost_to_customer')
allCartvariants = CartVariant.objects.filter(Cart_id=cartId).select_related(
'Variant').values('Variant__Crossed_price', 'Variant__Cost_to_customer')
render_context['allItems'] = allCartvariants.union(
allCartproducts, all=True)
render_context['total_amt_customer'] = sum(
[Item['Variant__Cost_to_customer'] for Item in render_context['allItems']])
render_context['total_amt_crossed'] = sum(
[Item['Variant__Crossed_price'] for Item in render_context['allItems']])
render_context['saved_amt'] = render_context['total_amt_crossed'] - \
render_context['total_amt_customer']
render_context['saved_amt_percent'] = round(
((render_context['saved_amt'])/render_context['total_amt_crossed'])*100)
else:
render_context = {}
render_context['allItems'] = []
render_context['total_amt_customer'] = 0
render_context['total_amt_crossed'] = 0
render_context['saved_amt_percent'] = 0
render_context['numItems'] = 0
if request.method == 'POST':
form = ShippingAddressForm(request.POST)
if form.is_valid():
savedInstance = form.save(commit=False)
savedInstance.Owner = request.user.Customer
savedInstance.save()
# MORE LOGIC WILL BE WRITTEN
return HttpResponse('SUCCESS')
else:
render_context['form'] = form
return render(request, 'store/components/order/orderPage.html', render_context)
else:
render_context['form'] = ShippingAddressForm()
return render(request, 'store/components/order/orderPage.html', render_context)
else:
# redirect to the login page
return HttpResponse('<h1>Forbidden Page,No Logged In</h1>')
def asyncsearch(request):
if request.method == 'POST':
allVariants = Variant.objects.all().values_list('id', 'Name')
varList = []
for variant in allVariants:
varList.append(
{'id': variant[0], 'name': variant[1], 'ptype': 'variable'})
return JsonResponse({"matchedItems": varList})
else:
allVariants = Variant.objects.all().values_list('id', 'Name', 'Product_id')
varList = []
for variant in allVariants:
varList.append(
{'v_id': variant[0], 'name': f'{variant[1]}....', 'p_id': variant[2], 'ptype': 'variable'})
return JsonResponse({"matchedItems": varList})
@login_required()
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("customer_login"))
def handle_login(request):
if request.method == 'POST':
formData = LoginForm(request.POST)
if formData.is_valid():
from django.contrib.auth import login, authenticate
user = authenticate(
username=formData.cleaned_data['Username'], password=formData.cleaned_data.get('Password'))
if user != None:
login(request, user)
messages.success(request, 'Login Successful')
return HttpResponseRedirect(reverse('viewcart'))
else:
render_context = {}
render_context['form'] = LoginForm()
messages.error(request, 'Invalid Details!')
return render(request, 'store/components/user/login.html', render_context)
else:
render_context = {}
render_context['form'] = LoginForm()
return render(request, 'store/components/user/login.html', render_context)
def handle_register(request):
if request.method == 'POST':
formData = CustomUserCreationForm(request.POST)
if formData.is_valid():
new_user = formData.save()
new_user.refresh_from_db()
new_user.Customer.PhoneNumber = formData.cleaned_data.get(
'PhoneNumber')
new_user.save()
raw_pass = formData.clean_password2()
from django.contrib.auth import login, authenticate
user = authenticate(
username=formData.cleaned_data['username'], password=formData.clean_password2())
login(request, user)
messages.info(
request, f'You have registered successfully With username: {formData.cleaned_data.get("username")}')
return HttpResponseRedirect(reverse('login'))
# new_user.save() #signal will automatically create and attach customer instance
# # new_user.refresh_from_db() #now will be able to acess Customer profile
else:
return render(request, 'store/components/user/register.html', {'form': formData})
print('got POST')
print(request.POST)
else:
return render(request, 'store/components/user/register.html', {'form': CustomUserCreationForm()})
# --------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------
# OLD SEARCH IMPLEMENTATIONS SAVED SNIPPETS
# only necassary in via text search (When searched through nav input)
# all_normal_prods = Product.objects.filter(Name__icontains = productquery)
# all_variants = Variant.objects.filter(Name__icontains = productquery)
# if all_normal_prods.exists() or all_variants.exists():
# print('Name Exists')
# normal_prod_detail = all_normal_prods.values('Name','id','Cost_to_customer','Crossed_price','Thumb_path','Highlights','Discount','Brand_id','Is_normal_product')
# variable_prod_details = all_variants.values('Name','id','Cost_to_customer','Crossed_price','Thumb_path','Highlights','Discount','Product_id','Is_normal_product')
# final_ordered_qs = variable_prod_details.union(normal_prod_detail,all=True).order_by('Cost_to_customer')
# # print(normal_prod_detail.union(variable_prod_details,all=True))
# # print(all_normal_prods.values('id','Brand_id','Is_normal_product').union(all_variants.values('id','Product_id','Is_normal_product')))
# try:
# page_no = request.GET['page']
# except:
# page_no = 1
# try:
# perpage = request.GET['perpage']
# except:
# perpage = 2
# paginator = Paginator(final_ordered_qs, perpage,orphans=1)
# render_context['product_data'] = paginator.get_page(page_no)
# # only necassary in via text search (When searched through nav input)
# # only to let me know if there was any search result at all
# render_context['msg'] = True
# # only necassary in via text search (When searched through nav input)
# render_context['result_for'] = 'Name'
# render_context['productquery'] = productquery
# return render(request,'store/components/view/search/searchView.html',render_context)
# else:
# # No Match in Name and Tags
# # search Description
# print('Searching Description')
# all_tagged_normal_products = Product.objects.filter(Description__icontains = productquery).values('Name','id','Cost_to_customer','Crossed_price','Thumb_path','Highlights','Discount','Brand_id','Is_normal_product')
# all_tagged_variable_products = ProductWithVariant.objects.filter(Description__icontains = productquery).prefetch_related('Variants')
# prod_vars = None
# for product in all_tagged_variable_products:
# p_vars = product.Variants.all().values('Name','id','Cost_to_customer','Crossed_price','Thumb_path','Highlights','Discount','Product_id','Is_normal_product')
# if prod_vars != None:
# prod_vars = p_vars.union(prod_vars)
# else:
# prod_vars = p_vars
# prod_vars.union(all_tagged_normal_products).order_by('Cost_to_customer')
# if all_tagged_normal_products.exists():
# print('Description Exists')
# render_context['product_data'] = all_tagged_normal_products
# render_context['variant_details'] = prod_vars
# render_context['msg'] = True
# render_context['result_for'] = 'Description'
# return render(request,'store/components/view/search/searchView.html',render_context)
# else:
# print('Final No result')
# render_context['msg'] = False
# render_context['result_for'] = None
# return render(request,'store/components/view/search/searchView.html',render_context)
| null |
store/comented_view.py
|
comented_view.py
|
py
| 30,668 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.template.loader.render_to_string",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.warning",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "django.db.models.F",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "forms.ShippingAddressForm",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "forms.ShippingAddressForm",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "forms.LoginForm",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 462,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "forms.LoginForm",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 467,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "forms.LoginForm",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "forms.CustomUserCreationForm",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 489,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "forms.CustomUserCreationForm",
"line_number": 499,
"usage_type": "call"
}
] |
308327852
|
import scrapy
from scrapy.http import HtmlResponse
from leroyparser.items import LeroyparserItem
from scrapy.loader import ItemLoader
class LeroymerlinSpider(scrapy.Spider):
name = 'leroymerlin'
allowed_domains = ['leroymerlin.ru']
def __init__(self, text):
self.start_urls = [f'https://leroymerlin.ru/search/?q={text}']
def parse(self, response: HtmlResponse):
links_on_product = response.xpath("//div[@class='hover-image-buttons']/a/@href").extract()
for link in links_on_product:
if 'product' in link:
yield response.follow(link, callback=self.parse_product)
next_page = response.xpath("//div[@class='next-paginator-button-wrapper']/a/@href").extract_first()
yield response.follow(next_page, callback=self.parse)
def parse_product(self, response: HtmlResponse):
loader = ItemLoader(item=LeroyparserItem(), response=response)
loader.add_value('_id', str(response))
loader.add_xpath('name', "//h1/text()")
loader.add_xpath('photos', "//source[@media=' only screen and (min-width: 1024px)']/@srcset")
loader.add_xpath('terms', "//dt/text()")
loader.add_xpath('definitions', "//dd/text()")
loader.add_xpath('price', "//meta[@itemprop='price']/@content")
loader.add_value('link', str(response))
yield loader.load_item()
| null |
Lesson 7/leroyparser/spiders/leroymerlin.py
|
leroymerlin.py
|
py
| 1,386 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scrapy.Spider",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "scrapy.http.HtmlResponse",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "scrapy.http.HtmlResponse",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "scrapy.loader.ItemLoader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "leroyparser.items.LeroyparserItem",
"line_number": 24,
"usage_type": "call"
}
] |
96833340
|
from django.db import models
import os
import graphene
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.utils.decorators import cached_classmethod
from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, StreamFieldPanel, TabbedInterface, HelpPanel
from wagtail.core.blocks import TextBlock, RichTextBlock, ListBlock, StreamBlock, StructBlock, URLBlock, PageChooserBlock, CharBlock
from wagtail.core.fields import StreamField, RichTextField
from wagtail.core.models import Page, Orderable
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.images.models import Image, AbstractImage, AbstractRendition
from wagtail.images.blocks import ImageChooserBlock
from wagtail.snippets.models import register_snippet
from wagtail.search import index
from wagtail.admin.edit_handlers import PageChooserPanel
from . import blocks as custom_blocks
from . import forms as custom_forms
WYSIWYG_GENERAL = ['h1', 'h2', 'h3', 'h4', 'bold', 'link', 'ul', 'ol', 'code']
WYSIWYG_SERVICE_STEP = ['ul', 'ol', 'link']
DEFAULT_MAX_LENGTH = 255
SHORT_DESCRIPTION_LENGTH = 300
class TranslatedImage(AbstractImage):
admin_form_fields = Image.admin_form_fields
def __str__(self):
return self.title or self.title_en
class TranslatedImageRendition(AbstractRendition):
image = models.ForeignKey(TranslatedImage, related_name='renditions', on_delete=models.CASCADE)
class Meta:
unique_together = (
('image', 'filter_spec', 'focal_point_key'),
)
@register_snippet
class ThreeOneOne(ClusterableModel):
title = models.CharField(max_length=DEFAULT_MAX_LENGTH)
url = models.URLField()
def __str__(self):
return self.title
class HomePage(Page):
parent_page_types = []
# subpage_types = ['base.ServicePage', 'base.ProcessPage', 'base.InformationPage', 'base.DepartmentPage']
subpage_types = ['base.ServicePage', 'base.ProcessPage', 'base.InformationPage', 'base.DepartmentPage', 'base.TopicPage']
image = models.ForeignKey(TranslatedImage, null=True, on_delete=models.SET_NULL, related_name='+')
class JanisBasePage(Page):
parent_page_types = ['base.HomePage']
subpage_types = []
search_fields = Page.search_fields + [
index.RelatedFields('owner', [
index.SearchField('last_name', partial_match=True),
index.FilterField('last_name'),
])
]
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
author_notes = RichTextField(
# max_length=DEFAULT_MAX_LENGTH,
features=['ul', 'ol', 'link'],
blank=True,
verbose_name='Notes for authors (Not visible on the resident facing site)'
)
def janis_url(self):
url_page_type = self.janis_url_page_type
page_slug = self.slug
# TODO: Add other languages
return os.environ["JANIS_URL"] + "/en/" + url_page_type + "/" + page_slug
def janis_preview_url(self):
revision = self.get_latest_revision()
url_page_type = self.janis_url_page_type
global_id = graphene.Node.to_global_id('PageRevisionNode', revision.id)
return os.environ["JANIS_URL"] + "/en/preview/" + url_page_type + "/" + global_id
# Default preview_url before janis_preview_url gets set
def fallback_preview_url(self):
return "https://alpha.austin.gov"
# data needed to construct preview URLs for any language
# [janis_url_base]/[lang]/preview/[url_page_type]/[global_id]
# ex: http://localhost:3000/es/preview/information/UGFnZVJldmlzaW9uTm9kZToyMjg=
def preview_url_data(self):
revision = self.get_latest_revision()
global_id = graphene.Node.to_global_id('PageRevisionNode', revision.id)
return {
"janis_url_base": os.environ["JANIS_URL"],
"url_page_type": self.janis_url_page_type,
"global_id": global_id
}
class Meta:
abstract = True
class JanisPage(JanisBasePage):
@cached_classmethod
def get_edit_handler(cls):
if hasattr(cls, 'edit_handler'):
return cls.edit_handler.bind_to_model(cls)
edit_handler = TabbedInterface([
ObjectList(cls.content_panels + [
FieldPanel('author_notes')
], heading='Content'),
ObjectList(Page.promote_panels + cls.promote_panels, heading='Search Info')
])
return edit_handler.bind_to_model(cls)
class Meta:
abstract = True
class ServicePage(JanisPage):
janis_url_page_type = "services"
department = models.ForeignKey(
'base.DepartmentPage',
on_delete=models.PROTECT,
verbose_name='Select a Department',
blank=True,
null=True,
)
steps = StreamField(
[
('basic_step', RichTextBlock(
features=WYSIWYG_SERVICE_STEP,
label='Basic Step'
)),
('step_with_options_accordian', StructBlock(
[
('options_description', TextBlock('Describe the set of options')),
('options', ListBlock(
StructBlock([
('option_name', TextBlock(
label='Option name. (When clicked, this name will expand the content for this option'
)),
('option_description', RichTextBlock(
features=WYSIWYG_SERVICE_STEP,
label='Option Content',
)),
]),
)),
],
label="Step With Options"
)),
],
verbose_name='Write out the steps a resident needs to take to use the service',
# this gets called in the help panel
help_text='A step may have a basic text step or an options accordian which reveals two or more options',
blank=True
)
dynamic_content = StreamField(
[
('map_block', custom_blocks.SnippetChooserBlockWithAPIGoodness('base.Map', icon='site')),
('what_do_i_do_with_block', custom_blocks.WhatDoIDoWithBlock()),
('collection_schedule_block', custom_blocks.CollectionScheduleBlock()),
('recollect_block', custom_blocks.RecollectBlock()),
],
verbose_name='Add any maps or apps that will help the resident use the service',
blank=True
)
additional_content = RichTextField(
features=WYSIWYG_GENERAL,
verbose_name='Write any additional content describing the service',
help_text='Section header: What else do I need to know?',
blank=True
)
base_form_class = custom_forms.ServicePageForm
short_description = models.TextField(
max_length=SHORT_DESCRIPTION_LENGTH,
blank=True,
verbose_name='Write a description of this service'
)
content_panels = [
FieldPanel('title_en'),
FieldPanel('title_es'),
FieldPanel('title_ar'),
FieldPanel('title_vi'),
FieldPanel('short_description'),
InlinePanel('topics', label='Topics'),
FieldPanel('department'),
MultiFieldPanel(
[
HelpPanel(steps.help_text, classname="coa-helpPanel"),
StreamFieldPanel('steps')
],
heading=steps.verbose_name,
classname='coa-multiField-nopadding'
),
StreamFieldPanel('dynamic_content'),
MultiFieldPanel(
[
HelpPanel(additional_content.help_text, classname="coa-helpPanel"),
FieldPanel('additional_content')
],
heading=additional_content.verbose_name,
classname='coa-multiField-nopadding'
)
,
InlinePanel('contacts', label='Contacts'),
]
class ProcessPage(JanisPage):
janis_url_page_type = "processes"
department = models.ForeignKey(
'base.DepartmentPage',
on_delete=models.PROTECT,
verbose_name='Select a Department',
blank=True,
null=True,
)
description = models.TextField(blank=True)
image = models.ForeignKey(TranslatedImage, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
# TODO: Add images array field
base_form_class = custom_forms.ProcessPageForm
content_panels = [
InlinePanel('topics', label='Topics'),
FieldPanel('department'),
FieldPanel('description'),
ImageChooserPanel('image'),
InlinePanel('contacts', label='Contacts'),
InlinePanel('process_steps', label="Process steps"),
]
class InformationPage(JanisPage):
janis_url_page_type = "information"
department = models.ForeignKey(
'base.DepartmentPage',
on_delete=models.PROTECT,
verbose_name='Select a Department',
blank=True,
null=True,
)
description = models.TextField(blank=True, verbose_name='Write a description of this page')
options = StreamField(
[
('option', RichTextBlock(
features=WYSIWYG_GENERAL,
label='Option'
))
],
verbose_name='Add option sections as needed.',
help_text='Options are needed when the reader needs to make a choice between a few options, such as ways to fill out a form (online, by phone, in person, etc.).',
blank=True
)
additional_content = RichTextField(
features=WYSIWYG_GENERAL,
verbose_name='Write any additional content describing the service',
blank=True
)
# TODO: Add images array field
base_form_class = custom_forms.InformationPageForm
content_panels = [
FieldPanel('title_en'),
FieldPanel('title_es'),
FieldPanel('title_ar'),
FieldPanel('title_vi'),
InlinePanel('topics', label='Topics'),
FieldPanel('department'),
FieldPanel('description'),
StreamFieldPanel('options'),
FieldPanel('additional_content'),
InlinePanel('contacts', label='Contacts'),
]
class TopicCollectionPage(JanisPage):
janis_url_page_type = "topiccollection"
description = models.TextField(blank=True)
theme = models.ForeignKey(
'base.Theme',
on_delete=models.PROTECT,
related_name='topicCollectionPages',
null=True, blank=True,
)
image = models.ForeignKey(TranslatedImage, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
base_form_class = custom_forms.TopicCollectionPageForm
content_panels = [
FieldPanel('title_en'),
FieldPanel('title_es'),
FieldPanel('title_ar'),
FieldPanel('title_vi'),
FieldPanel('description'),
FieldPanel('theme'),
ImageChooserPanel('image'),
InlinePanel('topiccollections', label='Topic Collections this page belongs to'),
]
class TopicPage(JanisPage):
janis_url_page_type = "topic"
description = models.TextField(blank=True)
image = models.ForeignKey(TranslatedImage, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
external_services = StreamField(
[
('link_en', StructBlock([
('url', URLBlock()),
('title', CharBlock()),
], icon='link', label='Link [EN]')),
('link_es', StructBlock([
('url', URLBlock()),
('title', CharBlock()),
], icon='link', label='Link [ES]')),
('link_ar', StructBlock([
('url', URLBlock()),
('title', CharBlock()),
], icon='link', label='Link [AR]')),
('link_vi', StructBlock([
('url', URLBlock()),
('title', CharBlock()),
], icon='link', label='Link [VI]')),
],
verbose_name='External links to services',
blank=True
)
base_form_class = custom_forms.TopicPageForm
content_panels = [
FieldPanel('title_en'),
FieldPanel('title_es'),
FieldPanel('title_ar'),
FieldPanel('title_vi'),
FieldPanel('description'),
ImageChooserPanel('image'),
StreamFieldPanel('external_services'),
InlinePanel('topiccollections', label='Topic Collections this page belongs to'),
]
class DepartmentPage(JanisPage):
janis_url_page_type = "department"
def __str__(self):
return self.title_en
what_we_do = RichTextField(
features=WYSIWYG_GENERAL,
verbose_name='What we do',
blank=True
)
image = models.ForeignKey(TranslatedImage, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
mission = models.TextField(
verbose_name='Mission',
)
job_listings = models.URLField(
verbose_name='Job listings url',
help_text='Link to a page with job listings.',
blank=True
)
top_services = StreamField(
[
('link_en', StructBlock([
('url', URLBlock()),
('title', CharBlock()),
], icon='link', label='Link [EN]')),
('link_es', StructBlock([
('url', URLBlock()),
('title', CharBlock()),
], icon='link', label='Link [ES]')),
('link_ar', StructBlock([
('url', URLBlock()),
('title', CharBlock()),
], icon='link', label='Link [AR]')),
('link_vi', StructBlock([
('url', URLBlock()),
('title', CharBlock()),
], icon='link', label='Link [VI]')),
# ('page', PageChooserBlock(
# label='Choose a page',
# icon='doc-full'
# ))
],
verbose_name='Links to top services',
blank=True
)
base_form_class = custom_forms.DepartmentPageForm
content_panels = [
FieldPanel('title_en'),
FieldPanel('title_es'),
FieldPanel('title_ar'),
FieldPanel('title_vi'),
FieldPanel('what_we_do'),
ImageChooserPanel('image'),
FieldPanel('mission'),
InlinePanel('contacts', label='Contacts'),
InlinePanel('department_directors', label="Department Directors"),
FieldPanel('job_listings'),
StreamFieldPanel('top_services'),
]
class DepartmentPageDirector(Orderable):
page = ParentalKey(DepartmentPage, related_name='department_directors')
name = models.CharField(max_length=DEFAULT_MAX_LENGTH)
title = models.CharField(max_length=DEFAULT_MAX_LENGTH, default='Director')
photo = models.ForeignKey(TranslatedImage, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
about = models.TextField(blank=True)
panels = [
FieldPanel('name'),
FieldPanel('title'),
ImageChooserPanel('photo'),
FieldPanel('about'),
]
class ProcessPageStep(Orderable):
page = ParentalKey(ProcessPage, related_name='process_steps')
title = models.CharField(max_length=DEFAULT_MAX_LENGTH)
short_title = models.CharField(max_length=DEFAULT_MAX_LENGTH)
link_title = models.CharField(max_length=DEFAULT_MAX_LENGTH)
description = models.TextField(blank=True)
image = models.ForeignKey(TranslatedImage, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
overview_steps = RichTextField(features=WYSIWYG_GENERAL, verbose_name='Write out the steps a resident needs to take to use the service', blank=True)
detailed_content = RichTextField(features=WYSIWYG_GENERAL, verbose_name='Write any detailed content describing the process', blank=True)
quote = models.TextField(blank=True)
panels = [
FieldPanel('title'),
FieldPanel('short_title'),
FieldPanel('link_title'),
FieldPanel('description'),
ImageChooserPanel('image'),
FieldPanel('overview_steps'),
FieldPanel('detailed_content'),
FieldPanel('quote'),
]
@register_snippet
class Theme(ClusterableModel):
slug = models.SlugField()
text = models.CharField(max_length=DEFAULT_MAX_LENGTH)
description = models.TextField()
def __str__(self):
return self.text
@register_snippet
class Map(ClusterableModel):
description = models.TextField()
location = models.ForeignKey('base.Location', on_delete=models.CASCADE, related_name='+')
def __str__(self):
return self.description
def serializable_data(self):
data = {
'location': self.location.serializable_data(),
'description': self.description,
}
data['location'].pop('pk')
return data
@register_snippet
class Location(ClusterableModel):
TX = 'TX'
STATE_CHOICES = (
(TX, 'Texas'),
)
USA = 'United States'
COUNTRY_CHOICES = (
(USA, 'United States'),
)
name = models.CharField(max_length=DEFAULT_MAX_LENGTH)
street = models.TextField()
city = models.CharField(max_length=DEFAULT_MAX_LENGTH, default='Austin')
state = models.CharField(max_length=2, choices=STATE_CHOICES, default=TX)
country = models.CharField(max_length=100, choices=COUNTRY_CHOICES, default=USA)
zip = models.CharField(max_length=50)
panels = [
FieldPanel('name'),
MultiFieldPanel(children=[
FieldPanel('street'),
FieldPanel('city', classname='col5'),
FieldPanel('state', classname='col4'),
FieldPanel('zip', classname='col2'),
FieldPanel('country', classname='col5'),
], heading='Location'),
]
def __str__(self):
return self.name
class DayAndDuration(ClusterableModel):
MONDAY = 'Monday'
TUESDAY = 'Tuesday'
WEDNESDAY = 'Wednesday'
THURSDAY = 'Thursday'
FRIDAY = 'Friday'
SATURDAY = 'Saturday'
SUNDAY = 'Sunday'
DAY_OF_WEEK_CHOICES = (
(MONDAY, 'Monday'),
(TUESDAY, 'Tuesday'),
(WEDNESDAY, 'Wednesday'),
(THURSDAY, 'Thursday'),
(FRIDAY, 'Friday'),
(SATURDAY, 'Saturday'),
(SUNDAY, 'Sunday'),
)
day_of_week = models.CharField(max_length=20, choices=DAY_OF_WEEK_CHOICES)
start_time = models.TimeField()
end_time = models.TimeField()
content_panels = [
FieldPanel('day_of_week'),
FieldPanel('start_time'),
FieldPanel('end_time'),
]
def __str__(self):
return f'{self.day_of_week} {self.start_time} - {self.end_time}'
@register_snippet
class Contact(ClusterableModel):
name = models.CharField(max_length=DEFAULT_MAX_LENGTH)
email = models.EmailField()
phone = models.CharField(max_length=DEFAULT_MAX_LENGTH)
location = models.ForeignKey(Location, null=True, blank=True, related_name='+', on_delete=models.SET_NULL)
social_media = StreamField(
[
('url', URLBlock(
label='Social media url'
))
],
verbose_name='Links to any social media pages',
help_text='For example: https://www.facebook.com/atxpoliceoversight/',
blank=True
)
panels = [
FieldPanel('name'),
FieldPanel('email'),
FieldPanel('phone'),
SnippetChooserPanel('location'),
InlinePanel('hours', label='Hours'),
StreamFieldPanel('social_media'),
]
def __str__(self):
return self.name
class ContactDayAndDuration(Orderable, DayAndDuration):
contact = ParentalKey(Contact, related_name='hours')
content_panels = [
SnippetChooserPanel('day_and_duration'),
]
class ProcessPageContact(ClusterableModel):
process = ParentalKey(ProcessPage, related_name='contacts')
contact = models.ForeignKey(Contact, related_name='+', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('contact'),
]
def __str__(self):
return self.contact.name
class ProcessPageTopic(ClusterableModel):
page = ParentalKey(ProcessPage, related_name='topics')
topic = models.ForeignKey('base.TopicPage', verbose_name='Select a Topic', related_name='+', on_delete=models.CASCADE)
panels = [
PageChooserPanel('topic'),
]
def __str__(self):
return self.topic.text
class ServicePageContact(ClusterableModel):
page = ParentalKey(ServicePage, related_name='contacts')
contact = models.ForeignKey(Contact, related_name='+', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('contact'),
]
def __str__(self):
return self.contact.name
class TopicCollectionPageTopicCollection(ClusterableModel):
page = ParentalKey(TopicCollectionPage, related_name='topiccollections')
topiccollection = models.ForeignKey('base.TopicCollectionPage', verbose_name='Select a Topic Collection', related_name='+', on_delete=models.CASCADE)
panels = [
PageChooserPanel('topiccollection'),
]
def __str__(self):
return self.topiccollection.text
class TopicPageTopicCollection(ClusterableModel):
page = ParentalKey(TopicPage, related_name='topiccollections')
topiccollection = models.ForeignKey('base.TopicCollectionPage', verbose_name='Select a Topic Collection', related_name='+', on_delete=models.CASCADE)
panels = [
PageChooserPanel('topiccollection'),
]
def __str__(self):
return self.topiccollection.text
class ServicePageTopic(ClusterableModel):
page = ParentalKey(ServicePage, related_name='topics')
topic = models.ForeignKey('base.TopicPage', verbose_name='Select a Topic', related_name='+', on_delete=models.CASCADE)
toplink = models.BooleanField(default=False, verbose_name='Make this service a top link for this topic')
panels = [
MultiFieldPanel(
[
PageChooserPanel('topic'),
FieldPanel('toplink'),
]
),
]
def __str__(self):
return self.topic.text
class InformationPageContact(ClusterableModel):
page = ParentalKey(InformationPage, related_name='contacts')
contact = models.ForeignKey(Contact, related_name='+', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('contact'),
]
def __str__(self):
return self.contact.name
class InformationPageTopic(ClusterableModel):
page = ParentalKey(InformationPage, related_name='topics')
topic = models.ForeignKey('base.TopicPage', verbose_name='Select a Topic', related_name='+', on_delete=models.CASCADE)
toplink = models.BooleanField(default=False, verbose_name='Make this page a top link for this topic')
panels = [
MultiFieldPanel(
[
PageChooserPanel('topic'),
FieldPanel('toplink'),
]
),
]
def __str__(self):
return self.topic.text
class DepartmentPageContact(ClusterableModel):
page = ParentalKey(DepartmentPage, related_name='contacts')
contact = models.ForeignKey(Contact, related_name='+', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('contact'),
]
def __str__(self):
return self.contact.name
@register_snippet
class Department(ClusterableModel):
slug = models.SlugField()
name = models.CharField(max_length=DEFAULT_MAX_LENGTH)
mission = models.TextField()
image = models.ForeignKey(TranslatedImage, null=True, on_delete=models.SET_NULL, related_name='+')
panels = [
FieldPanel('name'),
FieldPanel('mission'),
InlinePanel('contacts', label='Contacts'),
ImageChooserPanel('image'),
]
def __str__(self):
return self.name
class DepartmentContact(ClusterableModel):
department = ParentalKey(Department, related_name='contacts')
contact = models.ForeignKey(Contact, related_name='+', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('contact'),
]
def __str__(self):
return self.department.name
| null |
joplin/base/models.py
|
models.py
|
py
| 24,194 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "wagtail.images.models.AbstractImage",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "wagtail.images.models.Image.admin_form_fields",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "wagtail.images.models.Image",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "wagtail.images.models.AbstractRendition",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.db.models.URLField",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "wagtail.snippets.models.register_snippet",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "wagtail.core.models.Page",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "wagtail.core.models.Page",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "wagtail.core.models.Page.search_fields",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "wagtail.core.models.Page",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "wagtail.search.index.RelatedFields",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "wagtail.search.index",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "wagtail.search.index.SearchField",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "wagtail.search.index",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "wagtail.search.index.FilterField",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "wagtail.search.index",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "graphene.Node.to_global_id",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "graphene.Node",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "graphene.Node.to_global_id",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "graphene.Node",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "wagtail.admin.edit_handlers.TabbedInterface",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.ObjectList",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.ObjectList",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "wagtail.core.models.Page.promote_panels",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "wagtail.core.models.Page",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "wagtail.utils.decorators.cached_classmethod",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "django.db.models.PROTECT",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "wagtail.core.fields.StreamField",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.RichTextBlock",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.TextBlock",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.ListBlock",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.TextBlock",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.RichTextBlock",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "wagtail.core.fields.StreamField",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "django.db.models.TextField",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.MultiFieldPanel",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.HelpPanel",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.StreamFieldPanel",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.StreamFieldPanel",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.MultiFieldPanel",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.HelpPanel",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "django.db.models.PROTECT",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "wagtail.images.edit_handlers.ImageChooserPanel",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "django.db.models.PROTECT",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "wagtail.core.fields.StreamField",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.RichTextBlock",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.StreamFieldPanel",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "django.db.models.TextField",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "django.db.models.PROTECT",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "wagtail.images.edit_handlers.ImageChooserPanel",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "django.db.models.TextField",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "wagtail.core.fields.StreamField",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.URLBlock",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.CharBlock",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.URLBlock",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.CharBlock",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.URLBlock",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.CharBlock",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.URLBlock",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.CharBlock",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "wagtail.images.edit_handlers.ImageChooserPanel",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.StreamFieldPanel",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 386,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 386,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.TextField",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 387,
"usage_type": "name"
},
{
"api_name": "django.db.models.URLField",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "wagtail.core.fields.StreamField",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.URLBlock",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.CharBlock",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.URLBlock",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.CharBlock",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.URLBlock",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.CharBlock",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.StructBlock",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.URLBlock",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.CharBlock",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "wagtail.images.edit_handlers.ImageChooserPanel",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.StreamFieldPanel",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "wagtail.core.models.Orderable",
"line_number": 440,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.TextField",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "wagtail.images.edit_handlers.ImageChooserPanel",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "wagtail.core.models.Orderable",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 456,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 457,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 460,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "django.db.models.TextField",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "wagtail.images.edit_handlers.ImageChooserPanel",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 478,
"usage_type": "name"
},
{
"api_name": "django.db.models.SlugField",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 479,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 480,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 481,
"usage_type": "name"
},
{
"api_name": "wagtail.snippets.models.register_snippet",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 488,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 489,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 490,
"usage_type": "attribute"
},
{
"api_name": "wagtail.snippets.models.register_snippet",
"line_number": 487,
"usage_type": "name"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 507,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 518,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 519,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 520,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 521,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 522,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 523,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.MultiFieldPanel",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "wagtail.snippets.models.register_snippet",
"line_number": 506,
"usage_type": "name"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 540,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 558,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 558,
"usage_type": "name"
},
{
"api_name": "django.db.models.TimeField",
"line_number": 559,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 559,
"usage_type": "name"
},
{
"api_name": "django.db.models.TimeField",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 560,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 565,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 573,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 574,
"usage_type": "name"
},
{
"api_name": "django.db.models.EmailField",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 575,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 576,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 577,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 577,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 577,
"usage_type": "attribute"
},
{
"api_name": "wagtail.core.fields.StreamField",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "wagtail.core.blocks.URLBlock",
"line_number": 581,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "wagtail.snippets.edit_handlers.SnippetChooserPanel",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 595,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.StreamFieldPanel",
"line_number": 596,
"usage_type": "call"
},
{
"api_name": "wagtail.snippets.models.register_snippet",
"line_number": 572,
"usage_type": "name"
},
{
"api_name": "wagtail.core.models.Orderable",
"line_number": 603,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 604,
"usage_type": "call"
},
{
"api_name": "wagtail.snippets.edit_handlers.SnippetChooserPanel",
"line_number": 607,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 611,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 612,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 613,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 613,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 613,
"usage_type": "attribute"
},
{
"api_name": "wagtail.snippets.edit_handlers.SnippetChooserPanel",
"line_number": 616,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 622,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 624,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 624,
"usage_type": "attribute"
},
{
"api_name": "wagtail.admin.edit_handlers.PageChooserPanel",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 633,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 634,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 635,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 635,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 635,
"usage_type": "attribute"
},
{
"api_name": "wagtail.snippets.edit_handlers.SnippetChooserPanel",
"line_number": 638,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 644,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 646,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 646,
"usage_type": "attribute"
},
{
"api_name": "wagtail.admin.edit_handlers.PageChooserPanel",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 655,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 656,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 657,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 657,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 657,
"usage_type": "attribute"
},
{
"api_name": "wagtail.admin.edit_handlers.PageChooserPanel",
"line_number": 660,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 666,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 667,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 668,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 668,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 668,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 669,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 669,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.edit_handlers.MultiFieldPanel",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.PageChooserPanel",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 675,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 683,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 685,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 685,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 685,
"usage_type": "attribute"
},
{
"api_name": "wagtail.snippets.edit_handlers.SnippetChooserPanel",
"line_number": 688,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 694,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 696,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 696,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 696,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 697,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 697,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.edit_handlers.MultiFieldPanel",
"line_number": 700,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.PageChooserPanel",
"line_number": 702,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 703,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 711,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 712,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 713,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 713,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 713,
"usage_type": "attribute"
},
{
"api_name": "wagtail.snippets.edit_handlers.SnippetChooserPanel",
"line_number": 716,
"usage_type": "call"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 723,
"usage_type": "name"
},
{
"api_name": "django.db.models.SlugField",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 724,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 725,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 725,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 726,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 727,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 727,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 727,
"usage_type": "attribute"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 730,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 731,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.InlinePanel",
"line_number": 732,
"usage_type": "call"
},
{
"api_name": "wagtail.images.edit_handlers.ImageChooserPanel",
"line_number": 733,
"usage_type": "call"
},
{
"api_name": "wagtail.snippets.models.register_snippet",
"line_number": 722,
"usage_type": "name"
},
{
"api_name": "modelcluster.models.ClusterableModel",
"line_number": 740,
"usage_type": "name"
},
{
"api_name": "modelcluster.fields.ParentalKey",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 742,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 742,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 742,
"usage_type": "attribute"
},
{
"api_name": "wagtail.snippets.edit_handlers.SnippetChooserPanel",
"line_number": 745,
"usage_type": "call"
}
] |
163490273
|
from src.platform.tomcat.authenticate import checkAuth
from src.platform.tomcat.interfaces import TINTERFACES
from requests import exceptions
from cprint import FingerPrint
from re import findall
from log import LOG
import utility
class FPrint(FingerPrint):
def __init__(self):
self.platform = "tomcat"
self.version = "4.0"
self.title = TINTERFACES.APP
self.uri = "/index.jsp"
self.port = 8080
self.hash = None
def check(self, ip, port=None):
"""
"""
try:
rport = self.port if port is None else port
url = "http://{0}:{1}{2}".format(ip, rport, self.uri)
response = utility.requests_get(url)
found = findall("Apache Tomcat/(.*?)\n", response.content)
if len(found) > 0 and self.version in found[0]:
return True
except exceptions.Timeout:
utility.Msg("{0} timeout to {1}:{2}".format(self.platform,
ip, rport),
LOG.DEBUG)
except exceptions.ConnectionError:
utility.Msg("{0} connection error to {1}:{2}".format(self.platform,
ip, rport),
LOG.DEBUG)
return False
| null |
src/platform/tomcat/fingerprints/Tomcat4.py
|
Tomcat4.py
|
py
| 1,402 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cprint.FingerPrint",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "src.platform.tomcat.interfaces.TINTERFACES.APP",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "src.platform.tomcat.interfaces.TINTERFACES",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "utility.requests_get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.Timeout",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "utility.Msg",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "log.LOG.DEBUG",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "log.LOG",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "requests.exceptions.ConnectionError",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "utility.Msg",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "log.LOG.DEBUG",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "log.LOG",
"line_number": 39,
"usage_type": "name"
}
] |
333349377
|
from pathlib import Path
import os
import os.path as osp
import logging
# set up directories
HOME = str(Path.home())
CACHE = osp.join(HOME, '.dbinspector')
LOGS = osp.join(CACHE, 'logs')
# parsed
UNIPROT = osp.join(CACHE, 'uniprot')
REFSEQ = osp.join(CACHE, 'refseq')
# downloads
DATA = osp.join(CACHE, 'data')
REFSEQ_FASTA = osp.join(DATA, 'refseq_fasta')
for folder in [CACHE, LOGS, UNIPROT, REFSEQ, DATA, REFSEQ_FASTA]:
os.makedirs(folder, exist_ok=True)
# logging
logging.basicConfig(filename=osp.join(LOGS, 'dbinspection.log'),
filemode='a',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
| null |
dbi_pkg/dbinspector/startup.py
|
startup.py
|
py
| 777 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path.home",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 26,
"usage_type": "attribute"
}
] |
471852386
|
import os
import socket
import unittest
import subprocess
import dbma.checkings as checkings
from datetime import datetime
class CheckingsTeestCase(unittest.TestCase):
"""
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
# create a socket server
cls.ip = '127.0.0.1'
cls.port = 65321
cls.user = 'unittest'
cls.now = datetime.now().isoformat()
cls.t_file = f"/tmp/unittest-{cls.now}"
try:
cls.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
cls.sock.bind((cls.ip,cls.port))
cls.sock.listen(2)
except Exception :
pass
# create a user
subprocess.run(f"groupadd {cls.user}",shell=True)
subprocess.run(f"useradd -g {cls.user} {cls.user}",shell=True)
with open(cls.t_file,'w') as fd:
fd.write('unittest.')
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if cls.sock and hasattr(cls.sock,'close'):
cls.sock.close()
subprocess.run(f"userdel {cls.user}",shell=True)
os.remove(cls.t_file)
def test_01_is_port_in_use(self):
self.assertTrue(checkings.is_port_in_use(self.ip,self.port))
def test_02_is_user_exists(self):
self.assertTrue(checkings.is_user_exists('unittest'))
def test_03_is_file_exists(self):
self.assertTrue(checkings.is_file_exists(self.t_file))
def test_04_is_group_exists(self):
self.assertTrue(checkings.is_group_exists('unittest'))
def test_05_is_directory_exists(self):
self.assertTrue(checkings.is_directory_exists('/tmp/'))
def test_06_is_an_supported_mysql_version(self):
self.assertTrue(checkings.is_an_supported_mysql_version('mysql-8.0.17-linux-glibc2.12-x86_64.tar.xz'))
self.assertFalse(checkings.is_an_supported_mysql_version('mysql-8.0.16-linux-glibc2.12-x86_64.tar.xz'))
self.assertFalse(checkings.is_an_supported_mysql_version('mysql-5.7.26-linux-glibc2.12-x86_64.tar.gz'))
def test_07_is_local_ip(self):
"""
"""
self.assertTrue(checkings.is_local_ip('127.0.0.1'))
self.assertFalse(checkings.is_local_ip('127.0.0.1111'))
def test_08_is_template_file_exists(self):
"""
检查安装包中是否包涵配置文件模板
"""
project_dir = os.path.dirname(os.path.dirname(__file__))
# 查询 mysql-8.0.17 的配置文件版本要在
cnf_tmpl_file_17 = os.path.join(project_dir,'dbma/static/cnfs/','mysql-8.0.17.cnf.jinja')
self.assertTrue(os.path.isfile(cnf_tmpl_file_17))
# 查询 mysql-8.0.18 的配置文件版本要在
cnf_tmpl_file_18 = os.path.join(project_dir,'dbma/static/cnfs/','mysql-8.0.17.cnf.jinja')
self.assertTrue(os.path.isfile(cnf_tmpl_file_18))
# init 专用的配置文件模板存在
cnf_tmpl_init_only = os.path.join(project_dir,'dbma/static/cnfs/','mysql-8.0-init-only.jinja')
self.assertTrue(os.path.isfile(cnf_tmpl_init_only))
| null |
tests/test_checkings.py
|
test_checkings.py
|
py
| 3,126 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "socket.socket",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "dbma.checkings.is_port_in_use",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "dbma.checkings.is_user_exists",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "dbma.checkings.is_file_exists",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "dbma.checkings.is_group_exists",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "dbma.checkings.is_directory_exists",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "dbma.checkings.is_an_supported_mysql_version",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "dbma.checkings.is_an_supported_mysql_version",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "dbma.checkings.is_an_supported_mysql_version",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "dbma.checkings.is_local_ip",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "dbma.checkings.is_local_ip",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "dbma.checkings",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 84,
"usage_type": "attribute"
}
] |
387581399
|
# -*- coding: utf-8 -*-
import pytz
from pytest import raises, approx
import astral.geocoder
def location_count(name, locations):
return len(list(filter(lambda item: item.name == name, locations)))
class TestDatabase:
"""Test database access functions"""
def test_all_locations(self, test_database):
for loc in astral.geocoder.all_locations(test_database):
assert loc.name
location_list = astral.geocoder.all_locations(test_database)
all_locations = list(location_list)
assert location_count("London", all_locations) == 1
assert location_count("Abu Dhabi", all_locations) == 2
def test_lookup(self, test_database):
loc = astral.geocoder.lookup("London", test_database)
assert loc.name == "London"
assert loc.region == "England"
assert loc.latitude == approx(51.4733, abs=0.001)
assert loc.longitude == approx(-0.0008333, abs=0.000001)
tz = pytz.timezone("Europe/London")
tzl = pytz.timezone(loc.timezone)
assert tz == tzl
def test_city_in_db(self, test_database):
astral.geocoder.lookup("london", test_database)
def test_group_in_db(self, test_database):
astral.geocoder.lookup("africa", test_database)
def test_location_not_in_db(self, test_database):
with raises(KeyError):
astral.geocoder.lookup("Nowhere", test_database)
def test_group_not_in_db(self, test_database):
with raises(KeyError):
astral.geocoder.group("wallyland", test_database)
def test_lookup_city_and_region(self, test_database):
city_name = "Birmingham,England"
city = astral.geocoder.lookup(city_name, test_database)
assert city.name == "Birmingham"
assert city.region == "England"
def test_country_with_multiple_entries_no_country(self, test_database):
city = astral.geocoder.lookup("Abu Dhabi", test_database)
assert city.name == "Abu Dhabi"
def test_country_with_multiple_entries_with_country(self, test_database):
"""Test for fix made due to bug report from Klaus Alexander Seistrup"""
city = astral.geocoder.lookup("Abu Dhabi,United Arab Emirates", test_database)
assert city.name == "Abu Dhabi"
city = astral.geocoder.lookup("Abu Dhabi,UAE", test_database)
assert city.name == "Abu Dhabi"
class TestBugReports:
"""Test for bug report fixes"""
def test_Adelaide(self, test_database):
"""Test for fix made due to bug report from Klaus Alexander Seistrup"""
astral.geocoder.lookup("Adelaide", test_database)
def test_CandianCities(self, test_database):
astral.geocoder.lookup("Fredericton", test_database)
class TestDatabaseAddLocations:
"""Test adding locations to database"""
def test_newline_at_end(self, test_database):
count = astral.geocoder._location_count(test_database)
astral.geocoder.add_locations(
"A Place,A Region,Asia/Nicosia,35°10'N,33°25'E,162.0\n", test_database
)
assert astral.geocoder._location_count(test_database) == count + 1
def test_from_list_of_strings(self, test_database):
count = astral.geocoder._location_count(test_database)
astral.geocoder.add_locations(
[
"A Place,A Region,Asia/Nicosia,35°10'N,33°25'E,162.0",
"Another Place,Somewhere else,Asia/Nicosia,35°10'N,33°25'E,162.0",
],
test_database,
)
assert astral.geocoder._location_count(test_database) == count + 2
def test_from_list_of_lists(self, test_database):
count = astral.geocoder._location_count(test_database)
astral.geocoder.add_locations(
[
["A Place", "A Region", "Asia/Nicosia", "35°10'N", "33°25'E", "162.0"],
[
"Another Place",
"Somewhere else",
"Asia/Nicosia",
"35°10'N",
"33°25'E",
"162.0",
],
],
test_database,
)
assert astral.geocoder._location_count(test_database) == count + 2
def test_SanitizeKey():
assert astral.geocoder._sanitize_key("Los Angeles") == "los_angeles"
| null |
src/test/test_geocoder.py
|
test_geocoder.py
|
py
| 4,330 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "astral.geocoder.geocoder.all_locations",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.all_locations",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pytest.approx",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pytest.approx",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pytz.timezone",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pytz.timezone",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder.group",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.lookup",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder._location_count",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.add_locations",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder._location_count",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder._location_count",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.add_locations",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder._location_count",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder._location_count",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder.add_locations",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder._location_count",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "astral.geocoder.geocoder._sanitize_key",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "astral.geocoder.geocoder",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "astral.geocoder",
"line_number": 122,
"usage_type": "name"
}
] |
194736369
|
import re
from pylatex import (Alignat, Center, Document, LargeText, LineBreak, Math,
MediumText, PageStyle, Section, Subsection)
from pylatex.utils import NoEscape, bold, italic
class EquationsGenerator:
def __init__(self, equations):
geometry_options = {
"head": "2.5cm",
"left": "3cm",
"right": "3cm",
"bottom": "2.5cm"
}
doc = Document(geometry_options = geometry_options, inputenc = 'utf8')
self.doc = doc
equations = self.Read(equations)
self.Create(equations)
self.doc.generate_pdf(filepath = 'Equations', clean_tex = False, compiler = 'pdflatex')
def Read(self, equations):
eqs = []
for key in equations:
value = ' '.join([line.strip() for line in equations.get(key).strip().splitlines()])
equation = key + " &= " + value
eqs.append(equation)
return eqs
def Create(self, equations):
with self.doc.create(Center()) as Centered:
with Centered.create(Section(title='', numbering='')) as Title:
Title.append(LargeText(bold('Generalized model')))
with self.doc.create(Section(title='Equations', numbering='1.')) as Intro:
Intro.append(MediumText(('These are the equations for the model:')))
for eq in equations:
with self.doc.create(Alignat(numbering = True, escape = False)) as math_eq:
math_eq.append(eq)
| null |
SIRModel/EquationsGenerator.py
|
EquationsGenerator.py
|
py
| 1,510 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pylatex.Document",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pylatex.Center",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pylatex.Section",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pylatex.LargeText",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pylatex.utils.bold",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pylatex.Section",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pylatex.MediumText",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pylatex.Alignat",
"line_number": 39,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.