prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
<|fim_middle|>
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1]
<|fim▁end|> | return self.c == '.' or self.c == c |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
<|fim_middle|>
<|fim▁end|> | def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1] |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
<|fim_middle|>
<|fim▁end|> | """
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1] |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
<|fim_middle|>
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1]
<|fim▁end|> | rs[-1].is_star = True |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
<|fim_middle|>
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1]
<|fim▁end|> | rs.append(R(c)) |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
<|fim_middle|>
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1]
<|fim▁end|> | dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1] |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
<|fim_middle|>
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1]
<|fim▁end|> | dp[i + 1][j] |= dp[i + 1][j - 1] |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
<|fim_middle|>
return dp[-1][-1]
<|fim▁end|> | if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1] |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
<|fim_middle|>
return dp[-1][-1]
<|fim▁end|> | dp[i + 1][j] = dp[i][j - 1] |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def <|fim_middle|>(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1]
<|fim▁end|> | __init__ |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def <|fim_middle|>(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1]
<|fim▁end|> | match |
<|file_name|>regular_expression_matching.py<|end_file_name|><|fim▁begin|>class R:
def __init__(self, c):
self.c = c
self.is_star = False
def match(self, c):
return self.c == '.' or self.c == c
class Solution(object):
def <|fim_middle|>(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
rs = []
""":type: list[R]"""
for c in p:
if c == '*':
rs[-1].is_star = True
else:
rs.append(R(c))
lr = len(rs)
ls = len(s)
s += '\0'
dp = [[False] * (ls + 1) for _ in range(lr + 1)]
dp[0][0] = True
for i, r in enumerate(rs):
for j in range(ls + 1):
c = s[j - 1]
if r.is_star:
dp[i + 1][j] = dp[i][j]
if j and r.match(c):
dp[i + 1][j] |= dp[i + 1][j - 1]
else:
if j and r.match(c):
dp[i + 1][j] = dp[i][j - 1]
return dp[-1][-1]
<|fim▁end|> | isMatch |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'<|fim▁hole|>
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)<|fim▁end|> | SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test' |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
<|fim_middle|>
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
<|fim_middle|>
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches() |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
<|fim_middle|>
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
<|fim_middle|>
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
<|fim_middle|>
<|fim▁end|> | root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span) |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
<|fim_middle|>
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
<|fim_middle|>
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | assert span.parent_id is None |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def <|fim_middle|>():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | session |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def <|fim_middle|>():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | patch_sqlalchemy |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def <|fim_middle|>():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | is_mysql_running |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def <|fim_middle|>(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | assert_span |
<|file_name|>test_mysqldb.py<|end_file_name|><|fim▁begin|>import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test'
@pytest.fixture
def session():
Session = sessionmaker()
engine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def <|fim_middle|>(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
<|fim▁end|> | test_db |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
<|fim▁hole|> stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))<|fim▁end|> | for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
<|fim_middle|>
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f)) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
<|fim_middle|>
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
<|fim_middle|>
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
<|fim_middle|>
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
<|fim_middle|>
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
<|fim_middle|>
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | super(Worker, self).__init__()
self._func = func
self.q = queue |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
<|fim_middle|>
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
<|fim_middle|>
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
<|fim_middle|>
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max)) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
<|fim_middle|>
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
<|fim_middle|>
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
<|fim_middle|>
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
<|fim_middle|>
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
<|fim_middle|>
<|fim▁end|> | logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score)) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
<|fim_middle|>
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | act = spc.sample() |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
<|fim_middle|>
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | print(act) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
<|fim_middle|>
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | raise RuntimeError("stopped!") |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
<|fim_middle|>
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | return (stat.average, stat.max) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
<|fim_middle|>
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | self.eval_episode = int(self.eval_episode * 0.94) |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
<|fim_middle|>
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | player.restart_episode() |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def <|fim_middle|>(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | play_one_episode |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def <|fim_middle|>(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | f |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def <|fim_middle|>(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | play_model |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def <|fim_middle|>(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | eval_with_funcs |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def <|fim_middle|>(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | __init__ |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def <|fim_middle|>(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | func |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def <|fim_middle|>(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | run |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def <|fim_middle|>(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | eval_model_multithread |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def <|fim_middle|>(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | __init__ |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def <|fim_middle|>(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | _setup_graph |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def <|fim_middle|>(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | _trigger |
<|file_name|>common.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def <|fim_middle|>(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
<|fim▁end|> | play_n_episodes |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""<|fim▁hole|>
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)<|fim▁end|> | self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback) |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
<|fim_middle|>
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | """Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback) |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
<|fim_middle|>
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | """Store the message name."""
self.message_name = message_name
self.callback = None |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
<|fim_middle|>
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | """Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback) |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
<|fim_middle|>
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | """Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback) |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
<|fim_middle|>
<|fim▁end|> | """Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback) |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
<|fim_middle|>
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | """Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
<|fim_middle|>
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | """Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback) |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
<|fim_middle|>
<|fim▁end|> | """Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback) |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def <|fim_middle|>(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | __init__ |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def <|fim_middle|>(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | __call__ |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def <|fim_middle|>(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | _unload_instance |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def <|fim_middle|>(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | __init__ |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def <|fim_middle|>(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | __call__ |
<|file_name|>hooks.py<|end_file_name|><|fim▁begin|># ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def <|fim_middle|>(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
<|fim▁end|> | _unload_instance |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""<|fim▁hole|>try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass<|fim▁end|> | return ctypes.c_char_p(string.encode('utf-8'))
"""pandas""" |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
<|fim_middle|>
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | """Error thrown by xlearn trainer"""
pass |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
<|fim_middle|>
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | """Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
<|fim_middle|>
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | """Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg) |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
<|fim_middle|>
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | """Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string) |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
<|fim_middle|>
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | """Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8')) |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
<|fim_middle|>
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | """Dummy class for pandas.Series."""
pass |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
<|fim_middle|>
<|fim▁end|> | """Dummy class for pandas.DataFrame."""
pass |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
<|fim_middle|>
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | return None |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
<|fim_middle|>
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg) |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
<|fim_middle|>
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string) |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
<|fim_middle|>
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8')) |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def <|fim_middle|>():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | _load_lib |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def <|fim_middle|>(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | _check_call |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def <|fim_middle|>(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | c_str |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def <|fim_middle|>(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
<|fim▁end|> | c_str |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
<|fim▁hole|>if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()<|fim▁end|> | |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def sigmoid(x):
<|fim_middle|>
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()
<|fim▁end|> | """
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def sigmoid_grad(f):
<|fim_middle|>
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()
<|fim▁end|> | """
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def test_sigmoid_basic():
<|fim_middle|>
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()
<|fim▁end|> | """
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n" |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def test_sigmoid():
<|fim_middle|>
if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()
<|fim▁end|> | """
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
<|fim_middle|>
<|fim▁end|> | test_sigmoid_basic();
#test_sigmoid() |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def <|fim_middle|>(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()
<|fim▁end|> | sigmoid |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def <|fim_middle|>(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()
<|fim▁end|> | sigmoid_grad |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def <|fim_middle|>():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()
<|fim▁end|> | test_sigmoid_basic |
<|file_name|>q2_sigmoid.py<|end_file_name|><|fim▁begin|>import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def <|fim_middle|>():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()
<|fim▁end|> | test_sigmoid |
<|file_name|>generi_in_istituzioni.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from collections import OrderedDict
import locale
from optparse import make_option
from verify.management.commands import VerifyBaseCommand
from verify.models import *
from verify.politici_models import *
from django.db.models import Q, Count
__author__ = 'guglielmo'
class Command(VerifyBaseCommand):
"""
Report delle statistiche di genere complessive, a livello nazionale,
per tutti gli organi di tutte le istituzioni.
Può limitarsi a una o più istituzioni, se si passa un elenco di institution_id
"""
args = '<institution_id institution_id ...>'
help = "Check that all locations have only male components (list locations with female components)."
option_list = VerifyBaseCommand.option_list<|fim▁hole|>
institutions = OpInstitution.objects.using('politici').all()
if args:
institutions = institutions.filter(id__in=args)
self.logger.info(
"Verification {0} launched with institutions limited to {1}".format(
self.__class__.__module__, ",".join(institutions.values_list('id', flat=True))
)
)
else:
self.logger.info(
"Verification {0} launched for all institutions".format(
self.__class__.__module__
)
)
self.ok_locs = []
self.ko_locs = []
for institution in institutions:
charge_types_ids = OpInstitutionCharge.objects.using('politici').\
filter(date_end__isnull=True,
content__deleted_at__isnull=True).\
filter(institution=institution).\
values_list('charge_type', flat=True).\
distinct()
charge_types = OpChargeType.objects.using('politici').\
filter(id__in=charge_types_ids)
for charge_type in charge_types:
self.logger.info(
"Counting {0} in {1}".format(
charge_type.name, institution.name
)
)
qs = OpInstitutionCharge.objects.using('politici').\
filter(date_end__isnull=True,
content__deleted_at__isnull=True).\
filter(institution=institution,
charge_type=charge_type)
n_tot = qs.count()
n_fem = qs.filter(politician__sex__iexact='f').count()
n_mal = n_tot - n_fem
merged = [institution.name, charge_type.name, n_fem, n_mal, n_tot,]
merged.append(locale.format("%.2f",100. * n_fem / float(n_tot) ))
merged.append(locale.format("%.2f",100. * n_mal / float(n_tot) ))
self.ko_locs.append(merged)
outcome = Verification.OUTCOME.failed
self.logger.info(
"Report for {0} institutions generated.".format(
len(self.ko_locs)
)
)
return outcome<|fim▁end|> |
def execute_verification(self, *args, **options):
self.csv_headers = ["ISTITUZIONE", "INCARICO", "N_DONNE", "N_UOMINI", "N_TOTALI", "PERC_DONNE", "PERC_UOMINI"] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.