content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from .base import *
DEBUG = False
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats-prod.json'),
}
} | python |
from src.pybitbucket.bitbucket import Bitbucket
config = {
"secret-properties": "secretproperties.properties",
"properties": "properties.properties"}
bb = Bitbucket(settings=config)
# workspace = bb.workspace
prs_df = bb.df_prs
commits_df = bb.df_commits
prs_list = prs_df["pr_id"].unique().tolist().sort()
print(f"PRs: {prs_list}")
| python |
#!/usr/bin/env python
'''
Tiger
'''
import json
import os
import subprocess
from collections import OrderedDict
from tasks.util import (LoadPostgresFromURL, classpath, TempTableTask, grouper,
shell, TableTask, ColumnsTask, TagsTask,
Carto2TempTableTask)
from tasks.meta import (OBSColumn, GEOM_REF, GEOM_NAME, OBSTag, current_session)
from tasks.tags import SectionTags, SubsectionTags, LicenseTags, BoundaryTags
from luigi import (Task, WrapperTask, Parameter, LocalTarget, IntParameter)
from decimal import Decimal
class TigerSourceTags(TagsTask):
def version(self):
return 1
def tags(self):
return [
OBSTag(id='tiger-source',
name='US Census TIGER/Line Shapefiles',
type='source',
description='`TIGER/Line Shapefiles <https://www.census.gov/geo/maps-data/data/tiger-line.html>`_')
]
class ClippedGeomColumns(ColumnsTask):
def version(self):
return 13
def requires(self):
return {
'geom_columns': GeomColumns(),
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
'boundary':BoundaryTags(),
}
def columns(self):
cols = OrderedDict()
session = current_session()
input_ = self.input()
sections = input_['sections']
subsections = input_['subsections']
source = input_['source']['tiger-source']
license = input_['license']['no-restrictions']
boundary_type = input_['boundary']
for colname, coltarget in self.input()['geom_columns'].iteritems():
col = coltarget.get(session)
cols[colname + '_clipped'] = OBSColumn(
type='Geometry',
name='Shoreline clipped ' + col.name,
weight=Decimal(col.weight) + Decimal(0.01),
description='A cartography-ready version of {name}'.format(
name=col.name),
targets={col: 'cartography'},
tags=[sections['united_states'],
subsections['boundary'],
source, license]
)
interpolated_boundaries = ['block_clipped', 'block_group_clipped',
'puma_clipped','census_tract_clipped',
'county_clipped','state_clipped']
cartographic_boundaries = ['cbsa_clipped',
'school_district_elementary_clipped',
'place_clipped',
'school_district_secondary_clipped',
'zcta5_clipped',
'congressional_district_clipped',
'school_district_unified_clipped',
'block_clipped', 'block_group_clipped',
'puma_clipped','census_tract_clipped',
'county_clipped','state_clipped']
for colname, col in cols.iteritems():
if colname in interpolated_boundaries:
col.tags.append(boundary_type['interpolation_boundary'])
if colname in cartographic_boundaries:
col.tags.append(boundary_type['cartographic_boundary'])
return cols
class GeomColumns(ColumnsTask):
def version(self):
return 15
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
'boundary': BoundaryTags(),
}
def _generate_desc(self, sumlevel):
'''
Add figure to the description
'''
return SUMLEVELS_BY_SLUG[sumlevel]['census_description']
def columns(self):
input_ = self.input()
sections = input_['sections']
subsections = input_['subsections']
source = input_['source']['tiger-source']
license = input_['license']['no-restrictions']
columns = {
'block_group': OBSColumn(
type='Geometry',
name='US Census Block Groups',
description=self._generate_desc("block_group"),
weight=10,
tags=[sections['united_states'], subsections['boundary']]
),
'block': OBSColumn(
type='Geometry',
name='US Census Blocks',
description=self._generate_desc("block"),
weight=0,
tags=[sections['united_states'], subsections['boundary']]
),
'census_tract': OBSColumn(
type='Geometry',
name='US Census Tracts',
description=self._generate_desc("census_tract"),
weight=9,
tags=[sections['united_states'], subsections['boundary']]
),
'congressional_district': OBSColumn(
type='Geometry',
name='US Congressional Districts',
description=self._generate_desc("congressional_district"),
weight=5.4,
tags=[sections['united_states'], subsections['boundary']]
),
'county': OBSColumn(
type='Geometry',
name='US County',
description=self._generate_desc("county"),
weight=7,
tags=[sections['united_states'], subsections['boundary']]
),
'puma': OBSColumn(
type='Geometry',
name='US Census Public Use Microdata Areas',
description=self._generate_desc("puma"),
weight=5.5,
tags=[sections['united_states'], subsections['boundary']]
),
'state': OBSColumn(
type='Geometry',
name='US States',
description=self._generate_desc("state"),
weight=8,
tags=[sections['united_states'], subsections['boundary']]
),
'zcta5': OBSColumn(
type='Geometry',
name='US Census Zip Code Tabulation Areas',
description=self._generate_desc('zcta5'),
weight=6,
tags=[sections['united_states'], subsections['boundary']]
),
'school_district_elementary': OBSColumn(
type='Geometry',
name='Elementary School District',
description=self._generate_desc('school_district_elementary'),
weight=2.8,
tags=[sections['united_states'], subsections['boundary']]
),
'school_district_secondary': OBSColumn(
type='Geometry',
name='Secondary School District',
description=self._generate_desc('school_district_secondary'),
weight=2.9,
tags=[sections['united_states'], subsections['boundary']]
),
'school_district_unified': OBSColumn(
type='Geometry',
name='Unified School District',
description=self._generate_desc('school_district_unified'),
weight=5,
tags=[sections['united_states'], subsections['boundary']]
),
'cbsa': OBSColumn(
type='Geometry',
name='Core Based Statistical Area (CBSA)',
description=self._generate_desc("cbsa"),
weight=1,
tags=[sections['united_states'], subsections['boundary']]
),
'place': OBSColumn(
type='Geometry',
name='Incorporated Places',
description=self._generate_desc("place"),
weight=1.1,
tags=[sections['united_states'], subsections['boundary']]
),
}
for _,col in columns.iteritems():
col.tags.append(source)
col.tags.append(license)
return columns
class Attributes(ColumnsTask):
def version(self):
return 2
def requires(self):
return SectionTags()
def columns(self):
return OrderedDict([
('aland', OBSColumn(
type='Numeric',
name='Land area',
aggregate='sum',
weight=0,
)),
('awater', OBSColumn(
type='Numeric',
name='Water area',
aggregate='sum',
weight=0,
)),
])
class GeoidColumns(ColumnsTask):
def version(self):
return 6
def requires(self):
return {
'raw': GeomColumns(),
'clipped': ClippedGeomColumns()
}
def columns(self):
cols = OrderedDict()
clipped = self.input()['clipped']
for colname, coltarget in self.input()['raw'].iteritems():
col = coltarget._column
cols[colname + '_geoid'] = OBSColumn(
type='Text',
name=col.name + ' Geoids',
weight=0,
targets={
col: GEOM_REF,
clipped[colname + '_clipped']._column: GEOM_REF
}
)
return cols
class GeonameColumns(ColumnsTask):
def version(self):
return 2
def requires(self):
return {
'raw': GeomColumns(),
'clipped': ClippedGeomColumns(),
'subsections': SubsectionTags(),
'sections':SectionTags(),
}
def columns(self):
cols = OrderedDict()
clipped = self.input()['clipped']
subsection = self.input()['subsections']
sections = self.input()['sections']
for colname, coltarget in self.input()['raw'].iteritems():
col = coltarget._column
cols[colname + '_geoname'] = OBSColumn(
type='Text',
name=col.name + ' Proper Name',
weight=1,
tags=[subsection['names'],sections['united_states']],
targets={
col: GEOM_NAME,
clipped[colname + '_clipped']._column: GEOM_NAME
}
)
return cols
class DownloadTigerGeography(Task):
year = IntParameter()
geography = Parameter()
url_format = 'ftp://ftp2.census.gov/geo/tiger/TIGER{year}/{geography}/'
@property
def url(self):
return self.url_format.format(year=self.year, geography=self.geography)
@property
def directory(self):
return os.path.join('tmp', classpath(self), str(self.year))
def run(self):
shell('wget --recursive --continue --accept=*.zip '
'--no-parent --cut-dirs=3 --no-host-directories '
'--directory-prefix={directory} '
'{url}'.format(directory=self.directory, url=self.url))
def output(self):
filenames = shell('ls {}'.format(os.path.join(
self.directory, self.geography, '*.zip'))).split('\n')
for path in filenames:
yield LocalTarget(path)
def complete(self):
try:
exists = shell('ls {}'.format(os.path.join(self.directory, self.geography, '*.zip')))
return exists != ''
except subprocess.CalledProcessError:
return False
class UnzipTigerGeography(Task):
'''
Unzip tiger geography
'''
year = Parameter()
geography = Parameter()
def requires(self):
return DownloadTigerGeography(year=self.year, geography=self.geography)
@property
def directory(self):
return os.path.join('tmp', classpath(self), str(self.year), self.geography)
def run(self):
#for infile in self.input():
cmd = "cd {path} && find -iname '*.zip' -print0 | xargs -0 -n1 unzip -n -q ".format(
path=self.directory)
shell(cmd)
def output(self):
shps = shell('ls {}'.format(os.path.join(self.directory, '*.shp')))
for path in shps:
yield LocalTarget(path)
def complete(self):
try:
exists = shell('ls {}'.format(os.path.join(self.directory, '*.shp')))
return exists != ''
except subprocess.CalledProcessError:
return False
class TigerGeographyShapefileToSQL(TempTableTask):
'''
Take downloaded shapefiles and load them into Postgres
'''
year = Parameter()
geography = Parameter()
def requires(self):
return UnzipTigerGeography(year=self.year, geography=self.geography)
def run(self):
shapefiles = shell('ls {dir}/*.shp'.format(
dir=os.path.join('tmp', classpath(self), str(self.year), self.geography)
)).strip().split('\n')
cmd = 'ogrinfo {shpfile_path}'.format(shpfile_path=shapefiles[0])
resp = shell(cmd)
if 'Polygon' in resp:
nlt = '-nlt MultiPolygon'
else:
nlt = ''
cmd = 'PG_USE_COPY=yes PGCLIENTENCODING=latin1 ' \
'ogr2ogr -f PostgreSQL "PG:dbname=$PGDATABASE active_schema={schema}" ' \
'-t_srs "EPSG:4326" {nlt} -nln {tablename} ' \
'-lco OVERWRITE=yes ' \
'-lco SCHEMA={schema} {shpfile_path} '.format(
tablename=self.output().tablename,
schema=self.output().schema, nlt=nlt,
shpfile_path=shapefiles.pop())
shell(cmd)
# chunk into 500 shapefiles at a time.
for i, shape_group in enumerate(grouper(shapefiles, 500)):
shell(
'export PG_USE_COPY=yes PGCLIENTENCODING=latin1; '
'echo \'{shapefiles}\' | xargs -P 16 -I shpfile_path '
'ogr2ogr -f PostgreSQL "PG:dbname=$PGDATABASE '
'active_schema={schema}" -append '
'-t_srs "EPSG:4326" {nlt} -nln {tablename} '
'shpfile_path '.format(
shapefiles='\n'.join([shp for shp in shape_group if shp]),
tablename=self.output().tablename, nlt=nlt,
schema=self.output().schema))
print 'imported {} shapefiles'.format((i + 1) * 500)
session = current_session()
# Spatial index
session.execute('ALTER TABLE {qualified_table} RENAME COLUMN '
'wkb_geometry TO geom'.format(
qualified_table=self.output().table))
session.execute('CREATE INDEX ON {qualified_table} USING GIST (geom)'.format(
qualified_table=self.output().table))
class DownloadTiger(LoadPostgresFromURL):
url_template = 'https://s3.amazonaws.com/census-backup/tiger/{year}/tiger{year}_backup.sql.gz'
year = Parameter()
def run(self):
schema = 'tiger{year}'.format(year=self.year)
shell("psql -c 'DROP SCHEMA IF EXISTS \"{schema}\" CASCADE'".format(schema=schema))
shell("psql -c 'CREATE SCHEMA \"{schema}\"'".format(schema=schema))
url = self.url_template.format(year=self.year)
self.load_from_url(url)
class SimpleShoreline(TempTableTask):
year = Parameter()
def requires(self):
return {
'data': TigerGeographyShapefileToSQL(geography='AREAWATER', year=self.year),
'us_landmask': Carto2TempTableTask(table='us_landmask_union'),
}
def run(self):
session = current_session()
session.execute('CREATE TABLE {output} AS '
'SELECT ST_Subdivide(geom) geom, false in_landmask, '
' aland, awater, mtfcc '
'FROM {input} '
"WHERE mtfcc != 'H2030' OR awater > 300000".format(
input=self.input()['data'].table,
output=self.output().table
))
session.execute('CREATE INDEX ON {output} USING GIST (geom)'.format(
output=self.output().table
))
session.execute('UPDATE {output} data SET in_landmask = True '
'FROM {landmask} landmask '
'WHERE ST_WITHIN(data.geom, landmask.the_geom)'.format(
landmask=self.input()['us_landmask'].table,
output=self.output().table
))
class SplitSumLevel(TempTableTask):
'''
Split the positive table into geoms with a reasonable number of
vertices. Assumes there is a geoid and the_geom column.
'''
year = Parameter()
geography = Parameter()
def requires(self):
return SumLevel(year=self.year, geography=self.geography)
def run(self):
session = current_session()
session.execute('CREATE TABLE {output} '
'(id serial primary key, geoid text, the_geom geometry, '
'aland NUMERIC, awater NUMERIC)'.format(
output=self.output().table))
session.execute('INSERT INTO {output} (geoid, the_geom, aland, awater) '
'SELECT geoid, ST_Subdivide(the_geom) the_geom, '
' aland, awater '
'FROM {input} '
'WHERE aland > 0 '.format(output=self.output().table,
input=self.input().table))
session.execute('CREATE INDEX ON {output} USING GIST (the_geom)'.format(
output=self.output().table))
class JoinTigerWaterGeoms(TempTableTask):
'''
Join the split up pos to the split up neg, then union the geoms based
off the split pos id (technically the union on pos geom is extraneous)
'''
year = Parameter()
geography = Parameter()
def requires(self):
return {
'pos': SplitSumLevel(year=self.year, geography=self.geography),
'neg': SimpleShoreline(year=self.year),
}
def use_mask(self):
'''
Returns true if we should not clip interior geometries, False otherwise.
'''
return self.geography.lower() in ('state', 'county', )
def run(self):
session = current_session()
stmt = ('CREATE TABLE {output} AS '
'SELECT id, geoid, ST_Union(ST_MakeValid(neg.geom)) neg_geom, '
' MAX(pos.the_geom) pos_geom '
'FROM {pos} pos, {neg} neg '
'WHERE ST_Intersects(pos.the_geom, neg.geom) '
' AND pos.awater > 0 '
' {mask_clause} '
'GROUP BY id '.format(
neg=self.input()['neg'].table,
mask_clause=' AND in_landmask = false' if self.use_mask() else '',
pos=self.input()['pos'].table,
output=self.output().table), )[0]
session.execute(stmt)
class DiffTigerWaterGeoms(TempTableTask):
'''
Calculate the difference between the pos and neg geoms
'''
year = Parameter()
geography = Parameter()
def requires(self):
return JoinTigerWaterGeoms(year=self.year, geography=self.geography)
def run(self):
session = current_session()
stmt = ('CREATE TABLE {output} '
'AS SELECT geoid, id, ST_Difference( '
'ST_MakeValid(pos_geom), ST_MakeValid(neg_geom)) the_geom '
#'pos_geom, neg_geom) the_geom '
'FROM {input}'.format(
output=self.output().table,
input=self.input().table), )[0]
session.execute(stmt)
class PreunionTigerWaterGeoms(TempTableTask):
'''
Create new table with both diffed and non-diffed (didn't intersect with
water) geoms
'''
year = Parameter()
geography = Parameter()
def requires(self):
return {
'diffed': DiffTigerWaterGeoms(year=self.year, geography=self.geography),
'split': SplitSumLevel(year=self.year, geography=self.geography)
}
def run(self):
session = current_session()
session.execute('CREATE TABLE {output} '
'AS SELECT geoid::text, id::int, the_geom::geometry, '
'aland::numeric, awater::Numeric '
'FROM {split} LIMIT 0 '.format(
output=self.output().table,
split=self.input()['split'].table))
session.execute('INSERT INTO {output} (geoid, id, the_geom) '
'SELECT geoid, id, the_geom FROM {diffed} '
'WHERE ST_Area(ST_Transform(the_geom, 3857)) > 5000'
' AND ST_NPoints(the_geom) > 10 '.format(
output=self.output().table,
diffed=self.input()['diffed'].table))
session.execute('INSERT INTO {output} '
'SELECT geoid, id, the_geom, aland, awater FROM {split} '
'WHERE id NOT IN (SELECT id from {diffed})'.format(
split=self.input()['split'].table,
diffed=self.input()['diffed'].table,
output=self.output().table))
session.execute('CREATE INDEX ON {output} (geoid) '.format(
output=self.output().table))
class UnionTigerWaterGeoms(TempTableTask):
'''
Re-union the pos table based off its geoid, this includes holes in
the output geoms
'''
year = Parameter()
geography = Parameter()
def requires(self):
return PreunionTigerWaterGeoms(year=self.year, geography=self.geography)
def run(self):
session = current_session()
session.execute('CREATE TABLE {output} AS '
'SELECT geoid, ST_Union(ST_MakeValid(the_geom)) AS the_geom, '
' MAX(aland) aland, MAX(awater) awater '
'FROM {input} '
'GROUP BY geoid'.format(
output=self.output().table,
input=self.input().table))
class ShorelineClip(TableTask):
'''
Clip the provided geography to shoreline.
'''
# MTFCC meanings:
# http://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2009/TGRSHP09AF.pdf
year = Parameter()
geography = Parameter()
def version(self):
return 7
def requires(self):
return {
'data': UnionTigerWaterGeoms(year=self.year, geography=self.geography),
'geoms': ClippedGeomColumns(),
'geoids': GeoidColumns(),
'attributes': Attributes(),
'geonames': GeonameColumns()
}
def columns(self):
return OrderedDict([
('geoid', self.input()['geoids'][self.geography + '_geoid']),
('the_geom', self.input()['geoms'][self.geography + '_clipped']),
('aland', self.input()['attributes']['aland']),
('name', self.input()['geonames'][self.geography + '_geoname']),
])
def timespan(self):
return self.year
def populate(self):
session = current_session()
stmt = ('INSERT INTO {output} '
'SELECT geoid, ST_Union(ST_MakePolygon(ST_ExteriorRing(the_geom))) AS the_geom, '
' MAX(aland) AS aland, cdb_observatory.FIRST(name) AS name '
'FROM ( '
' SELECT geoid, (ST_Dump(the_geom)).geom AS the_geom, '
' aland, name '
' FROM {input} '
") holes WHERE GeometryType(the_geom) = 'POLYGON' "
'GROUP BY geoid'.format(
output=self.output().table,
input=self.input()['data'].table), )[0]
session.execute(stmt)
class SumLevel(TableTask):
geography = Parameter()
year = Parameter()
def has_10_suffix(self):
return self.geography.lower() in ('puma', 'zcta5', 'block', )
@property
def geoid(self):
return 'geoid10' if self.has_10_suffix() else 'geoid'
@property
def aland(self):
return 'aland10' if self.has_10_suffix() else 'aland'
@property
def awater(self):
return 'awater10' if self.has_10_suffix() else 'awater'
@property
def name(self):
if self.geography in ('state', 'county', 'census_tract', 'place',
'school_district_elementary', 'cbsa', 'metdiv',
'school_district_secondary',
'school_district_unified'):
return 'name'
elif self.geography in ('congressional_district', 'block_group'):
return 'namelsad'
elif self.geography in ('block'):
return 'name10'
elif self.geography in ('puma'):
return 'namelsad10'
@property
def input_tablename(self):
return SUMLEVELS_BY_SLUG[self.geography]['table']
def version(self):
return 11
def requires(self):
tiger = DownloadTiger(year=self.year)
return {
'data': tiger,
'attributes': Attributes(),
'geoids': GeoidColumns(),
'geoms': GeomColumns(),
'sections': SectionTags(),
'subsections': SubsectionTags(),
'geonames': GeonameColumns(),
}
def columns(self):
input_ = self.input()
cols = OrderedDict([
('geoid', input_['geoids'][self.geography + '_geoid']),
('the_geom', input_['geoms'][self.geography]),
('aland', input_['attributes']['aland']),
('awater', input_['attributes']['awater']),
])
if self.name:
cols['geoname'] = input_['geonames'][self.geography + '_geoname']
return cols
def timespan(self):
return self.year
def populate(self):
session = current_session()
from_clause = '{inputschema}.{input_tablename}'.format(
inputschema='tiger' + str(self.year),
input_tablename=self.input_tablename,
)
in_colnames = [self.geoid, 'geom', self.aland, self.awater]
if self.name:
in_colnames.append(self.name)
out_colnames = self.columns().keys()
session.execute('INSERT INTO {output} ({out_colnames}) '
'SELECT {in_colnames} '
'FROM {from_clause} '.format(
output=self.output().table,
in_colnames=', '.join(in_colnames),
out_colnames=', '.join(out_colnames),
from_clause=from_clause
))
class AllSumLevels(WrapperTask):
'''
Compute all sumlevels
'''
year = Parameter()
def requires(self):
for geo in ('state', 'county', 'census_tract', 'block_group', 'place',
'puma', 'zcta5', 'school_district_elementary', 'cbsa',
'school_district_secondary', 'school_district_unified',
'block', 'congressional_district'):
yield SumLevel(year=self.year, geography=geo)
yield ShorelineClip(year=self.year, geography=geo)
class SharedTigerColumns(ColumnsTask):
def version(self):
return 2
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
}
def columns(self):
input_ = self.input()
return OrderedDict([
('fullname', OBSColumn(
type='Text',
name='Name of the feature',
weight=3,
tags=[input_['sections']['united_states'],
input_['source']['tiger-source'],
input_['license']['no-restrictions']]
)),
('mtfcc', OBSColumn(
type='Text',
name='MAF/TIGER Feature Class Code Definitions',
description='''The MAF/TIGER Feature Class Code (MTFCC) is
a 5-digit code assigned by the Census Bureau intended to
classify and describe geographic objects or features. These
codes can be found in the TIGER/Line products. A full list of
code meanings can be found `here
<https://www.census.gov/geo/reference/mtfcc.html>`_.''',
weight=3,
tags=[input_['sections']['united_states'],
input_['source']['tiger-source'],
input_['license']['no-restrictions']]
))
])
class PointLandmarkColumns(ColumnsTask):
'''
Point landmark column definitions
'''
def version(self):
return 8
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
}
def columns(self):
input_ = self.input()
geom = OBSColumn(
id='pointlm_geom',
type='Geometry(Point)',
weight=5,
tags=[input_['sections']['united_states'],
input_['subsections']['poi'],
input_['source']['tiger-source'],
input_['license']['no-restrictions']]
)
cols = OrderedDict([
('pointlm_id', OBSColumn(
type='Text',
weight=0,
targets={geom: GEOM_REF}
)),
('pointlm_geom', geom)
])
return cols
class PointLandmark(TableTask):
'''
Point landmark data from the census
'''
year = Parameter()
def version(self):
return 2
def requires(self):
return {
'data': TigerGeographyShapefileToSQL(year=self.year,
geography='POINTLM'),
'meta': PointLandmarkColumns(),
'shared': SharedTigerColumns()
}
def timespan(self):
return self.year
def columns(self):
shared = self.input()['shared']
cols = self.input()['meta']
return OrderedDict([
('pointid', cols['pointlm_id']),
('fullname', shared['fullname']),
('mtfcc', shared['mtfcc']),
('geom', cols['pointlm_geom']),
])
def populate(self):
session = current_session()
session.execute('''
INSERT INTO {output}
SELECT pointid, fullname, mtfcc, geom
FROM {input}'''.format(output=self.output().table,
input=self.input()['data'].table))
class PriSecRoadsColumns(ColumnsTask):
'''
Primary & secondary roads column definitions
'''
def version(self):
return 5
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
}
def columns(self):
input_ = self.input()
geom = OBSColumn(
id='prisecroads_geom',
type='Geometry(LineString)',
weight=5,
tags=[input_['sections']['united_states'],
input_['subsections']['roads'],
input_['source']['tiger-source'],
input_['license']['no-restrictions']]
)
cols = OrderedDict([
('prisecroads_id', OBSColumn(
type='Text',
weight=0,
targets={geom: GEOM_REF}
)),
('rttyp', OBSColumn(
type='Text'
)),
('prisecroads_geom', geom)
])
return cols
class PriSecRoads(TableTask):
'''
Primary & Secondary roads from the census
'''
year = Parameter()
def requires(self):
return {
'data': TigerGeographyShapefileToSQL(year=self.year,
geography='PRISECROADS'),
'meta': PriSecRoadsColumns(),
'shared': SharedTigerColumns()
}
def version(self):
return 2
def timespan(self):
return self.year
def columns(self):
shared = self.input()['shared']
cols = self.input()['meta']
return OrderedDict([
('linearid', cols['prisecroads_id']),
('fullname', shared['fullname']),
('rttyp', cols['rttyp']),
('mtfcc', shared['mtfcc']),
('geom', cols['prisecroads_geom']),
])
def populate(self):
session = current_session()
session.execute('''
INSERT INTO {output}
SELECT linearid, fullname, rttyp, mtfcc, geom
FROM {input}'''.format(output=self.output().table,
input=self.input()['data'].table))
def load_sumlevels():
'''
Load summary levels from JSON. Returns a dict by sumlevel number.
'''
with open(os.path.join(os.path.dirname(__file__), 'summary_levels.json')) as fhandle:
sumlevels_list = json.load(fhandle)
sumlevels = {}
for slevel in sumlevels_list:
# Replace pkey ancestors with paths to columns
# We subtract 1 from the pkey because it's 1-indexed, unlike python
fields = slevel['fields']
for i, ancestor in enumerate(fields['ancestors']):
colpath = os.path.join('columns', classpath(load_sumlevels),
sumlevels_list[ancestor - 1]['fields']['slug'])
fields['ancestors'][i] = colpath
if fields['parent']:
fields['parent'] = os.path.join(
'columns', classpath(load_sumlevels),
sumlevels_list[fields['parent'] - 1]['fields']['slug'])
sumlevels[fields['summary_level']] = fields
return sumlevels
SUMLEVELS = load_sumlevels()
SUMLEVELS_BY_SLUG = dict([(v['slug'], v) for k, v in SUMLEVELS.iteritems()])
| python |
from collections import OrderedDict
from algorithms.RNN import RNNModel
from algorithms.AR import AutoRegressive
from algorithms.LSTM import LSTMModel
from algorithms import LSTNet, Optim
import torch
p = 5
def get_models_optimizers(node_list, algs, cuda, lr, hidden_dim, layer_dim, nonlinearity, Data):
models, quantile_models, optimizers = OrderedDict(), OrderedDict(), OrderedDict()
quantile_optimizers, combined_optimizers = OrderedDict(), OrderedDict()
for name in node_list:
model_dict = {'rnn': [RNNModel(input_dim=1, hidden_dim=hidden_dim, layer_dim=layer_dim, quantiles=[0.5],
nonlinearity=nonlinearity),
RNNModel(input_dim=1, hidden_dim=hidden_dim, layer_dim=layer_dim, quantiles=[0.05, 0.95],
nonlinearity=nonlinearity)],
'lstm': [LSTMModel(input_dim=1, hidden_dim=hidden_dim, layer_dim=layer_dim, quantiles=[0.5]),
LSTMModel(input_dim=1, hidden_dim=hidden_dim, layer_dim=layer_dim, quantiles=[0.05, 0.95])],
'ar': [AutoRegressive(quantiles=[0.5], p=p), AutoRegressive(quantiles=[0.05, 0.95], p=p)],
'LSTNet': [LSTNet.Model(Data, method='sharq', quantiles=[0.5]),
LSTNet.Model(Data, method='sharq', quantiles=[0.05, 0.95])]}
model, quantile_model = model_dict[algs][0], model_dict[algs][1]
if cuda:
models[name], quantile_models[name] = model.cuda(), quantile_model.cuda()
else:
models[name], quantile_models[name] = model, quantile_model
optimizer_dict = {'rnn': [torch.optim.SGD(models[name].parameters(), lr=lr),
torch.optim.SGD(quantile_models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr)],
'lstm': [torch.optim.SGD(models[name].parameters(), lr=lr),
torch.optim.SGD(quantile_models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr)],
'ar': [torch.optim.SGD(models[name].parameters(), lr=lr),
torch.optim.SGD(quantile_models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr)],
'LSTNet': [torch.optim.Adam(models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr)]}
optimizers[name] = optimizer_dict[algs][0]
quantile_optimizers[name] = optimizer_dict[algs][1]
combined_optimizers[name] = optimizer_dict[algs][2]
return models, quantile_models, optimizers, quantile_optimizers, combined_optimizers, p
| python |
#!/usr/bin/env python3
import logging
import sys
from .ToolChainExplorer import ToolChainExplorer
class ToolChainExplorerDFS(ToolChainExplorer):
def __init__(
self,
simgr,
max_length,
exp_dir,
nameFileShort,
worker,
):
super(ToolChainExplorerDFS, self).__init__(
simgr,
max_length,
exp_dir,
nameFileShort,
worker
)
self.log = logging.getLogger("ToolChainExplorerDFS")
self.log.setLevel("INFO")
def __take_longuest(self, simgr, source_stash):
"""
Take a state of source_stash with longuest amount of steps and append it to active stash
@pre : source_stash exists
"""
id_to_move = 0
max_step = 0
if len(simgr.stashes[source_stash]) > 0:
id_to_move = simgr.stashes[source_stash][0].globals["id"]
max_step = simgr.stashes[source_stash][0].globals["n_steps"]
else:
return
for s in simgr.stashes[source_stash]:
if s.globals["n_steps"] > max_step:
id_to_move = s.globals["id"]
max_step = s.globals["n_steps"]
simgr.move(source_stash, "active", lambda s: s.globals["id"] == id_to_move)
def step(self, simgr, stash="active", **kwargs):
try:
simgr = simgr.step(stash=stash, **kwargs)
except Exception as inst:
self.log.warning("ERROR IN STEP() - YOU ARE NOT SUPPOSED TO BE THERE !")
# self.log.warning(type(inst)) # the exception instance
self.log.warning(inst) # __str__ allows args to be printed directly,
exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.warning(exc_type, exc_obj)
exit(-1)
super().build_snapshot(simgr)
if self.print_sm_step and (
len(self.fork_stack) > 0 or len(simgr.deadended) > self.deadended
):
self.log.info(
"A new block of execution have been executed with changes in sim_manager."
)
self.log.info("Currently, simulation manager is :\n" + str(simgr))
self.log.info("pause stash len :" + str(len(self.pause_stash)))
if self.print_sm_step and len(self.fork_stack) > 0:
self.log.info("fork_stack : " + str(len(self.fork_stack)))
# if self.print_sm_step:
# self.log.info("len(self.loopBreak_stack) : " + str(len(self.loopBreak_stack)))
# self.log.info("state.globals['n_steps'] : " + str(state.globals['n_steps']))
# self.log.warning("STEP")
# We detect fork for a state
super().manage_fork(simgr)
# Remove state which performed more jump than the limit allowed
super().remove_exceeded_jump(simgr)
# Manage ended state
super().manage_deadended(simgr)
super().mv_bad_active(simgr)
# import pdb; pdb.set_trace()
# If limit of simultaneous state is not reached and we have some states available in pause stash
if len(simgr.stashes["pause"]) > 0 and len(simgr.active) < self.max_simul_state:
moves = min(
self.max_simul_state - len(simgr.active),
len(simgr.stashes["pause"]),
)
for m in range(moves):
self.__take_longuest(simgr, "pause")
super().manage_pause(simgr)
super().drop_excessed_loop(simgr)
# If states end with errors, it is often worth investigating. Set DEBUG_ERROR to live debug
# TODO : add a log file if debug error is not activated
super().manage_error(simgr)
super().manage_unconstrained(simgr)
for vis in simgr.active:
self.dict_addr_vis[
str(super().check_constraint(vis, vis.history.jump_target))
] = 1
super().excessed_step_to_active(simgr)
super().excessed_loop_to_active(simgr)
super().time_evaluation(simgr)
return simgr
| python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: signer.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0csigner.proto\x12\x07signrpc\"3\n\nKeyLocator\x12\x12\n\nkey_family\x18\x01 \x01(\x05\x12\x11\n\tkey_index\x18\x02 \x01(\x05\"L\n\rKeyDescriptor\x12\x15\n\rraw_key_bytes\x18\x01 \x01(\x0c\x12$\n\x07key_loc\x18\x02 \x01(\x0b\x32\x13.signrpc.KeyLocator\")\n\x05TxOut\x12\r\n\x05value\x18\x01 \x01(\x03\x12\x11\n\tpk_script\x18\x02 \x01(\x0c\"\xc4\x01\n\x0eSignDescriptor\x12(\n\x08key_desc\x18\x01 \x01(\x0b\x32\x16.signrpc.KeyDescriptor\x12\x14\n\x0csingle_tweak\x18\x02 \x01(\x0c\x12\x14\n\x0c\x64ouble_tweak\x18\x03 \x01(\x0c\x12\x16\n\x0ewitness_script\x18\x04 \x01(\x0c\x12\x1e\n\x06output\x18\x05 \x01(\x0b\x32\x0e.signrpc.TxOut\x12\x0f\n\x07sighash\x18\x07 \x01(\r\x12\x13\n\x0binput_index\x18\x08 \x01(\x05\"L\n\x07SignReq\x12\x14\n\x0craw_tx_bytes\x18\x01 \x01(\x0c\x12+\n\nsign_descs\x18\x02 \x03(\x0b\x32\x17.signrpc.SignDescriptor\"\x1c\n\x08SignResp\x12\x10\n\x08raw_sigs\x18\x01 \x03(\x0c\"2\n\x0bInputScript\x12\x0f\n\x07witness\x18\x01 \x03(\x0c\x12\x12\n\nsig_script\x18\x02 \x01(\x0c\">\n\x0fInputScriptResp\x12+\n\rinput_scripts\x18\x01 \x03(\x0b\x32\x14.signrpc.InputScript\"m\n\x0eSignMessageReq\x12\x0b\n\x03msg\x18\x01 \x01(\x0c\x12$\n\x07key_loc\x18\x02 \x01(\x0b\x32\x13.signrpc.KeyLocator\x12\x13\n\x0b\x64ouble_hash\x18\x03 \x01(\x08\x12\x13\n\x0b\x63ompact_sig\x18\x04 \x01(\x08\"$\n\x0fSignMessageResp\x12\x11\n\tsignature\x18\x01 \x01(\x0c\"B\n\x10VerifyMessageReq\x12\x0b\n\x03msg\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\x12\x0e\n\x06pubkey\x18\x03 \x01(\x0c\"\"\n\x11VerifyMessageResp\x12\r\n\x05valid\x18\x01 \x01(\x08\"\x80\x01\n\x10SharedKeyRequest\x12\x18\n\x10\x65phemeral_pubkey\x18\x01 \x01(\x0c\x12(\n\x07key_loc\x18\x02 \x01(\x0b\x32\x13.signrpc.KeyLocatorB\x02\x18\x01\x12(\n\x08key_desc\x18\x03 \x01(\x0b\x32\x16.signrpc.KeyDescriptor\"\'\n\x11SharedKeyResponse\x12\x12\n\nshared_key\x18\x01 \x01(\x0c\x32\xd4\x02\n\x06Signer\x12\x34\n\rSignOutputRaw\x12\x10.signrpc.SignReq\x1a\x11.signrpc.SignResp\x12@\n\x12\x43omputeInputScript\x12\x10.signrpc.SignReq\x1a\x18.signrpc.InputScriptResp\x12@\n\x0bSignMessage\x12\x17.signrpc.SignMessageReq\x1a\x18.signrpc.SignMessageResp\x12\x46\n\rVerifyMessage\x12\x19.signrpc.VerifyMessageReq\x1a\x1a.signrpc.VerifyMessageResp\x12H\n\x0f\x44\x65riveSharedKey\x12\x19.signrpc.SharedKeyRequest\x1a\x1a.signrpc.SharedKeyResponseB/Z-github.com/lightningnetwork/lnd/lnrpc/signrpcb\x06proto3')
_KEYLOCATOR = DESCRIPTOR.message_types_by_name['KeyLocator']
_KEYDESCRIPTOR = DESCRIPTOR.message_types_by_name['KeyDescriptor']
_TXOUT = DESCRIPTOR.message_types_by_name['TxOut']
_SIGNDESCRIPTOR = DESCRIPTOR.message_types_by_name['SignDescriptor']
_SIGNREQ = DESCRIPTOR.message_types_by_name['SignReq']
_SIGNRESP = DESCRIPTOR.message_types_by_name['SignResp']
_INPUTSCRIPT = DESCRIPTOR.message_types_by_name['InputScript']
_INPUTSCRIPTRESP = DESCRIPTOR.message_types_by_name['InputScriptResp']
_SIGNMESSAGEREQ = DESCRIPTOR.message_types_by_name['SignMessageReq']
_SIGNMESSAGERESP = DESCRIPTOR.message_types_by_name['SignMessageResp']
_VERIFYMESSAGEREQ = DESCRIPTOR.message_types_by_name['VerifyMessageReq']
_VERIFYMESSAGERESP = DESCRIPTOR.message_types_by_name['VerifyMessageResp']
_SHAREDKEYREQUEST = DESCRIPTOR.message_types_by_name['SharedKeyRequest']
_SHAREDKEYRESPONSE = DESCRIPTOR.message_types_by_name['SharedKeyResponse']
KeyLocator = _reflection.GeneratedProtocolMessageType('KeyLocator', (_message.Message,), {
'DESCRIPTOR' : _KEYLOCATOR,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.KeyLocator)
})
_sym_db.RegisterMessage(KeyLocator)
KeyDescriptor = _reflection.GeneratedProtocolMessageType('KeyDescriptor', (_message.Message,), {
'DESCRIPTOR' : _KEYDESCRIPTOR,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.KeyDescriptor)
})
_sym_db.RegisterMessage(KeyDescriptor)
TxOut = _reflection.GeneratedProtocolMessageType('TxOut', (_message.Message,), {
'DESCRIPTOR' : _TXOUT,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.TxOut)
})
_sym_db.RegisterMessage(TxOut)
SignDescriptor = _reflection.GeneratedProtocolMessageType('SignDescriptor', (_message.Message,), {
'DESCRIPTOR' : _SIGNDESCRIPTOR,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignDescriptor)
})
_sym_db.RegisterMessage(SignDescriptor)
SignReq = _reflection.GeneratedProtocolMessageType('SignReq', (_message.Message,), {
'DESCRIPTOR' : _SIGNREQ,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignReq)
})
_sym_db.RegisterMessage(SignReq)
SignResp = _reflection.GeneratedProtocolMessageType('SignResp', (_message.Message,), {
'DESCRIPTOR' : _SIGNRESP,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignResp)
})
_sym_db.RegisterMessage(SignResp)
InputScript = _reflection.GeneratedProtocolMessageType('InputScript', (_message.Message,), {
'DESCRIPTOR' : _INPUTSCRIPT,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.InputScript)
})
_sym_db.RegisterMessage(InputScript)
InputScriptResp = _reflection.GeneratedProtocolMessageType('InputScriptResp', (_message.Message,), {
'DESCRIPTOR' : _INPUTSCRIPTRESP,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.InputScriptResp)
})
_sym_db.RegisterMessage(InputScriptResp)
SignMessageReq = _reflection.GeneratedProtocolMessageType('SignMessageReq', (_message.Message,), {
'DESCRIPTOR' : _SIGNMESSAGEREQ,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignMessageReq)
})
_sym_db.RegisterMessage(SignMessageReq)
SignMessageResp = _reflection.GeneratedProtocolMessageType('SignMessageResp', (_message.Message,), {
'DESCRIPTOR' : _SIGNMESSAGERESP,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignMessageResp)
})
_sym_db.RegisterMessage(SignMessageResp)
VerifyMessageReq = _reflection.GeneratedProtocolMessageType('VerifyMessageReq', (_message.Message,), {
'DESCRIPTOR' : _VERIFYMESSAGEREQ,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.VerifyMessageReq)
})
_sym_db.RegisterMessage(VerifyMessageReq)
VerifyMessageResp = _reflection.GeneratedProtocolMessageType('VerifyMessageResp', (_message.Message,), {
'DESCRIPTOR' : _VERIFYMESSAGERESP,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.VerifyMessageResp)
})
_sym_db.RegisterMessage(VerifyMessageResp)
SharedKeyRequest = _reflection.GeneratedProtocolMessageType('SharedKeyRequest', (_message.Message,), {
'DESCRIPTOR' : _SHAREDKEYREQUEST,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SharedKeyRequest)
})
_sym_db.RegisterMessage(SharedKeyRequest)
SharedKeyResponse = _reflection.GeneratedProtocolMessageType('SharedKeyResponse', (_message.Message,), {
'DESCRIPTOR' : _SHAREDKEYRESPONSE,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SharedKeyResponse)
})
_sym_db.RegisterMessage(SharedKeyResponse)
_SIGNER = DESCRIPTOR.services_by_name['Signer']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'Z-github.com/lightningnetwork/lnd/lnrpc/signrpc'
_SHAREDKEYREQUEST.fields_by_name['key_loc']._options = None
_SHAREDKEYREQUEST.fields_by_name['key_loc']._serialized_options = b'\030\001'
_KEYLOCATOR._serialized_start=25
_KEYLOCATOR._serialized_end=76
_KEYDESCRIPTOR._serialized_start=78
_KEYDESCRIPTOR._serialized_end=154
_TXOUT._serialized_start=156
_TXOUT._serialized_end=197
_SIGNDESCRIPTOR._serialized_start=200
_SIGNDESCRIPTOR._serialized_end=396
_SIGNREQ._serialized_start=398
_SIGNREQ._serialized_end=474
_SIGNRESP._serialized_start=476
_SIGNRESP._serialized_end=504
_INPUTSCRIPT._serialized_start=506
_INPUTSCRIPT._serialized_end=556
_INPUTSCRIPTRESP._serialized_start=558
_INPUTSCRIPTRESP._serialized_end=620
_SIGNMESSAGEREQ._serialized_start=622
_SIGNMESSAGEREQ._serialized_end=731
_SIGNMESSAGERESP._serialized_start=733
_SIGNMESSAGERESP._serialized_end=769
_VERIFYMESSAGEREQ._serialized_start=771
_VERIFYMESSAGEREQ._serialized_end=837
_VERIFYMESSAGERESP._serialized_start=839
_VERIFYMESSAGERESP._serialized_end=873
_SHAREDKEYREQUEST._serialized_start=876
_SHAREDKEYREQUEST._serialized_end=1004
_SHAREDKEYRESPONSE._serialized_start=1006
_SHAREDKEYRESPONSE._serialized_end=1045
_SIGNER._serialized_start=1048
_SIGNER._serialized_end=1388
# @@protoc_insertion_point(module_scope)
| python |
import sym.models
import sym.trainer
import sym.datasets
import sym.config
| python |
#!/usr/bin/python
#
# Copyright (C) 2016 Google, Inc
# Written by Simon Glass <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0+
#
import os
import struct
import sys
import tempfile
import command
import tools
def fdt32_to_cpu(val):
"""Convert a device tree cell to an integer
Args:
Value to convert (4-character string representing the cell value)
Return:
A native-endian integer value
"""
if sys.version_info > (3, 0):
if isinstance(val, bytes):
val = val.decode('utf-8')
val = val.encode('raw_unicode_escape')
return struct.unpack('>I', val)[0]
def EnsureCompiled(fname):
"""Compile an fdt .dts source file into a .dtb binary blob if needed.
Args:
fname: Filename (if .dts it will be compiled). It not it will be
left alone
Returns:
Filename of resulting .dtb file
"""
_, ext = os.path.splitext(fname)
if ext != '.dts':
return fname
dts_input = tools.GetOutputFilename('source.dts')
dtb_output = tools.GetOutputFilename('source.dtb')
search_paths = [os.path.join(os.getcwd(), 'include')]
root, _ = os.path.splitext(fname)
args = ['-E', '-P', '-x', 'assembler-with-cpp', '-D__ASSEMBLY__']
args += ['-Ulinux']
for path in search_paths:
args.extend(['-I', path])
args += ['-o', dts_input, fname]
command.Run('cc', *args)
# If we don't have a directory, put it in the tools tempdir
search_list = []
for path in search_paths:
search_list.extend(['-i', path])
args = ['-I', 'dts', '-o', dtb_output, '-O', 'dtb']
args.extend(search_list)
args.append(dts_input)
command.Run('dtc', *args)
return dtb_output
def GetInt(node, propname, default=None):
prop = node.props.get(propname)
if not prop:
return default
value = fdt32_to_cpu(prop.value)
if type(value) == type(list):
raise ValueError("Node '%s' property '%' has list value: expecting"
"a single integer" % (node.name, propname))
return value
def GetString(node, propname, default=None):
prop = node.props.get(propname)
if not prop:
return default
value = prop.value
if type(value) == type(list):
raise ValueError("Node '%s' property '%' has list value: expecting"
"a single string" % (node.name, propname))
return value
def GetBool(node, propname, default=False):
if propname in node.props:
return True
return default
| python |
import inspect
import typing
try:
from contextlib import (
AsyncExitStack,
asynccontextmanager,
AbstractAsyncContextManager,
)
except ImportError: # pragma: no cover
AbstractAsyncContextManager = None # type: ignore
from async_generator import asynccontextmanager # type: ignore
from async_exit_stack import AsyncExitStack # type: ignore
def is_async_context_manager(obj: typing.Any) -> bool:
if AbstractAsyncContextManager is None: # pragma: no cover
return (
not inspect.isclass(obj)
and hasattr(obj, "__aenter__")
and hasattr(obj, "__aexit__")
)
return isinstance(obj, AbstractAsyncContextManager)
class asyncnullcontext:
async def __aenter__(self) -> None:
pass
async def __aexit__(self, *args: typing.Any) -> None:
pass
| python |
from oeis import phi
def test_phi():
assert [phi(x) for x in range (1, 10)] == [1, 1, 2, 2, 4, 2, 6, 4, 6] | python |
from datetime import datetime
import json
import glob
import os
from pathlib import Path
from multiprocessing.pool import ThreadPool
from typing import Dict
import numpy as np
import pandas as pd
from scipy.stats.mstats import gmean
import torch
from torch import nn
from torch.utils.data import DataLoader
ON_KAGGLE: bool = 'KAGGLE_WORKING_DIR' in os.environ
def gmean_df(df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(level=0).agg(lambda x: gmean(list(x)))
def mean_df(df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(level=0).mean()
def load_model(model: nn.Module, path: Path) -> Dict:
state = torch.load(str(path))
model.load_state_dict(state['model'])
print('Loaded model from epoch {epoch}, step {step:,}'.format(**state))
return state
class ThreadingDataLoader(DataLoader):
def __iter__(self):
sample_iter = iter(self.batch_sampler)
if self.num_workers == 0:
for indices in sample_iter:
yield self.collate_fn([self._get_item(i) for i in indices])
else:
prefetch = 1
with ThreadPool(processes=self.num_workers) as pool:
futures = []
for indices in sample_iter:
futures.append([pool.apply_async(self._get_item, args=(i,))
for i in indices])
if len(futures) > prefetch:
yield self.collate_fn([f.get() for f in futures.pop(0)])
# items = pool.map(lambda i: self.dataset[i], indices)
# yield self.collate_fn(items)
for batch_futures in futures:
yield self.collate_fn([f.get() for f in batch_futures])
def _get_item(self, i):
return self.dataset[i]
def write_event(log, step: int, **data):
data['step'] = step
data['dt'] = datetime.now().isoformat()
log.write(json.dumps(data, sort_keys=True))
log.write('\n')
log.flush()
def _smooth(ys, indices):
return [np.mean(ys[idx: indices[i + 1]])
for i, idx in enumerate(indices[:-1])]
import random
import math
from PIL import Image
import torchvision.transforms as transforms
from torchvision.transforms import (
ToTensor, Normalize, Compose, Resize, CenterCrop, RandomCrop,
RandomHorizontalFlip)
class RandomSizedCrop:
def __init__(self, size, interpolation=Image.BILINEAR,min_aspect=4/5, max_aspect=5/4,min_area=0.25, max_area=1):
self.size = size
self.interpolation = interpolation
self.min_aspect = min_aspect
self.max_aspect = max_aspect
self.min_area = min_area
self.max_area = max_area
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(self.min_area, self.max_area) * area
aspect_ratio = random.uniform(self.min_aspect, self.max_aspect)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
scale = Resize(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize(320),
transforms.ColorJitter(),
RandomSizedCrop(224),
])
test_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize(320),
RandomCrop(224),
])
tensor_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
from pathlib import Path
from typing import Callable, List
import cv2
import pandas as pd
from pathlib import Path
from typing import Callable, List
import cv2
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import Dataset
N_CLASSES = 1103
DATA_ROOT = Path('../input/imet-2019-fgvc6' if ON_KAGGLE else '/nfsshare/home/white-hearted-orange/data')
class TrainDataset(Dataset):
def __init__(self, root: Path, df: pd.DataFrame,count: pd.DataFrame,thres,
image_transform: Callable, debug: bool = True):
super().__init__()
self._root = root
self._df = df
self._image_transform = image_transform
self._debug = debug
self.index = np.where(count['count'] < thres)
def __len__(self):
return len(self._df)
def __getitem__(self, idx: int):
item = self._df.iloc[idx]
image = load_transform_image(
item, self._root, self._image_transform, debug=self._debug)
target = torch.zeros(N_CLASSES)
for cls in item.attribute_ids.split():
target[int(cls)] = 1
target[self.index] = 0
return image, target
class TTADataset:
def __init__(self, root: Path, df: pd.DataFrame,
image_transform: Callable, tta: int):
self._root = root
self._df = df
self._image_transform = image_transform
self._tta = tta
def __len__(self):
return len(self._df) * self._tta
def __getitem__(self, idx):
item = self._df.iloc[idx % len(self._df)]
image = load_transform_image(item, self._root, self._image_transform)
return image, item.id
def load_transform_image(
item, root: Path, image_transform: Callable, debug: bool = False):
image = load_image(item, root)
image = image_transform(image)
if debug:
image.save('_debug.png')
return tensor_transform(image)
def load_image(item, root: Path) -> Image.Image:
image = cv2.imread(str(root / f'{item.id}.png'))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return Image.fromarray(image)
def get_ids(root: Path) -> List[str]:
return sorted({p.name.split('_')[0] for p in root.glob('*.png')})
import argparse
from collections import defaultdict, Counter
import random
import pandas as pd
import tqdm
def make_folds(n_folds: int) -> pd.DataFrame:
df = pd.read_csv(DATA_ROOT / 'train.csv')
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split()
for cls in classes)
fold_cls_counts = defaultdict(int)
folds = [-1] * len(df)
for item in tqdm.tqdm(df.sample(frac=1, random_state=42).itertuples(),
total=len(df)):
cls = min(item.attribute_ids.split(), key=lambda cls: cls_counts[cls])
fold_counts = [(f, fold_cls_counts[f, cls]) for f in range(n_folds)]
min_count = min([count for _, count in fold_counts])
random.seed(item.Index)
fold = random.choice([f for f, count in fold_counts
if count == min_count])
folds[item.Index] = fold
for cls in item.attribute_ids.split():
fold_cls_counts[fold, cls] += 1
df['fold'] = folds
return df
####################################model#################################
"""
ResNet code gently borrowed from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
from collections import OrderedDict
import math
import torch.nn as nn
from torch.utils import model_zoo
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes * 2)
self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3,
stride=stride, padding=1, groups=groups,
bias=False)
self.bn2 = nn.BatchNorm2d(planes * 4)
self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False,
stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1,
groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False,
stride=1)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SENet(nn.Module):
def __init__(self, block, layers, groups, reduction, dropout_p=0.2,
inplanes=128, input_3x3=True, downsample_kernel_size=3,
downsample_padding=1, num_classes=1000):
"""
Parameters
----------
block (nn.Module): Bottleneck class.
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
layers (list of ints): Number of residual blocks for 4 layers of the
network (layer1...layer4).
groups (int): Number of groups for the 3x3 convolution in each
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
- For all models: 16
dropout_p (float or None): Drop probability for the Dropout layer.
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
inplanes (int): Number of input channels for layer1.
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
downsample_kernel_size (int): Kernel size for downsampling convolutions
in layer2, layer3 and layer4.
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
downsample_padding (int): Padding for downsampling convolutions in
layer2, layer3 and layer4.
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
num_classes (int): Number of outputs in `last_linear` layer.
- For all models: 1000
"""
super(SENet, self).__init__()
self.inplanes = inplanes
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1,
bias=False)),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,
bias=False)),
('bn2', nn.BatchNorm2d(64)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,
bias=False)),
('bn3', nn.BatchNorm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2,
padding=3, bias=False)),
('bn1', nn.BatchNorm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
# To preserve compatibility with Caffe weights `ceil_mode=True`
# is used instead of `padding=1`.
layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2,
ceil_mode=True)))
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0
)
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.avg_pool = nn.AvgPool2d(7, stride=1)
self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None
self.last_linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,
downsample_kernel_size=1, downsample_padding=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=downsample_kernel_size, stride=stride,
padding=downsample_padding, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, groups, reduction, stride,
downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
def features(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, x):
x = self.avg_pool(x)
if self.dropout is not None:
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.features(x)
x = self.logits(x)
return x
def initialize_pretrained_model(model, num_classes, settings):
assert num_classes == settings['num_classes'], \
'num_classes should be {}, but is {}'.format(
settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
def se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'):
model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['se_resnext50_32x4d'][pretrained]
initialize_pretrained_model(model, num_classes, settings)
return model
def se_resnext101_32x4d(num_classes=1000, pretrained=None):
model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['se_resnext101_32x4d'][pretrained]
initialize_pretrained_model(model, num_classes, settings)
return model
########################main.py########################################################
import argparse
from itertools import islice
import json
from pathlib import Path
import shutil
import warnings
from typing import Dict
import numpy as np
import pandas as pd
from sklearn.metrics import fbeta_score
from sklearn.exceptions import UndefinedMetricWarning
import torch
from torch import nn, cuda
from torch.optim import Adam, SGD,lr_scheduler
import tqdm
def predict(model, root: Path, df: pd.DataFrame, out_path: Path,
batch_size: int, tta: int, workers: int, use_cuda: bool):
loader = DataLoader(
dataset=TTADataset(root, df, test_transform, tta=tta),
shuffle=False,
batch_size=batch_size,
num_workers=workers,
)
model.eval()
all_outputs, all_ids = [], []
with torch.no_grad():
for inputs, ids in tqdm.tqdm(loader, desc='Predict'):
if use_cuda:
inputs = inputs.cuda()
outputs = torch.sigmoid(model(inputs))
all_outputs.append(outputs.data.cpu().numpy())
all_ids.extend(ids)
df = pd.DataFrame(
data=np.concatenate(all_outputs),
index=all_ids,
columns=map(str, range(N_CLASSES)))
df = mean_df(df)
df.to_hdf(out_path, 'prob', index_label='id')
print(f'Saved predictions to {out_path}')
def train(args, model: nn.Module, criterion, *, params,folds, count,
init_optimizer, use_cuda,
n_epochs=None, patience=2, max_lr_changes=2) -> bool:
lr = args.lr
n_epochs = n_epochs or args.n_epochs
params = list(params)
optimizer = init_optimizer(params, lr)
run_root = Path(args.run_root)
model_path = run_root / 'model.pt'
best_model_path = run_root / 'best-model.pt'
if best_model_path.exists():
state = load_model(model, best_model_path)
epoch = state['epoch']
step = state['step']
best_valid_loss = state['best_valid_loss']
else:
epoch = 1
step = 0
best_valid_loss = float('inf')
lr_changes = 0
save = lambda ep: torch.save({
'model': model.state_dict(),
'epoch': ep,
'step': step,
'best_valid_loss': best_valid_loss
}, str(model_path))
report_each = 10
log = run_root.joinpath('train.log').open('at', encoding='utf8')
valid_losses = []
lr_reset_epoch = epoch
### doing cv
train_fold = folds[folds['fold'] != 0]
valid_fold = folds[folds['fold'] == 0]
def make_loader(df: pd.DataFrame, image_transform,count,thres) -> DataLoader:
return DataLoader(
TrainDataset(train_root, df, count,thres,image_transform, debug=args.debug),
shuffle=True,
batch_size=args.batch_size,
num_workers=args.workers,
)
if args.limit:
train_loader = make_loader(train_fold[:args.limit], train_transform,count,args.count)
valid_loader = make_loader(valid_fold[:args.limit], test_transform,count,0)
else:
train_loader = make_loader(train_fold, train_transform,count,args.count)
valid_loader = make_loader(valid_fold, test_transform,count,0)
##############
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max = 20)
for epoch in range(epoch, n_epochs + 1):
scheduler.step()
model.train()
losses = []
tq = tqdm.tqdm(total=(len(train_loader) * args.batch_size))
tq.set_description(f'Epoch {epoch}, lr {lr}')
tl = train_loader
try:
mean_loss = 0
for i, (inputs, targets) in enumerate(tl):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# C is the number of classes.
batch_size = inputs.size(0)
#smoothed_labels =0.9*targets + 0.1*(torch.ones((batch_size,N_CLASSES)).cuda()-targets)
#smoothed_labels = smoothed_labels.cuda()
outputs = model(inputs)
loss = _reduce_loss(criterion(outputs, targets))
(batch_size * loss).backward()
if (i + 1) % args.step == 0:
optimizer.step()
optimizer.zero_grad()
step += 1
tq.update(batch_size)
losses.append(loss.item())
mean_loss = np.mean(losses[-report_each:])
tq.set_postfix(loss=f'{mean_loss:.3f}')
if i and i % report_each == 0:
write_event(log, step, loss=mean_loss)
write_event(log, step, loss=mean_loss)
tq.close()
save(epoch + 1)
valid_metrics = validation(model, criterion, valid_loader, use_cuda)
write_event(log, step, **valid_metrics)
valid_loss = valid_metrics['valid_loss']
valid_losses.append(valid_loss)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
shutil.copy(str(model_path), str(best_model_path))
elif (patience and epoch - lr_reset_epoch > patience and
min(valid_losses[-patience:]) > best_valid_loss):
# "patience" epochs without improvement
lr_changes +=1
if lr_changes > max_lr_changes:
break
lr *= 0.8
print(f'lr updated to {lr}')
lr_reset_epoch = epoch
optimizer = init_optimizer(params, lr)
except KeyboardInterrupt:
tq.close()
print('Ctrl+C, saving snapshot')
save(epoch)
print('done.')
return False
return True
def validation(
model: nn.Module, criterion, valid_loader, use_cuda,
) -> Dict[str, float]:
model.eval()
all_losses, all_predictions, all_targets = [], [], []
with torch.no_grad():
for inputs, targets in valid_loader:
all_targets.append(targets.numpy().copy())
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets)
all_losses.append(_reduce_loss(loss).item())
predictions = torch.sigmoid(outputs)
all_predictions.append(predictions.cpu().numpy())
all_predictions = np.concatenate(all_predictions)
all_targets = np.concatenate(all_targets)
def get_score(y_pred):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UndefinedMetricWarning)
return fbeta_score(
all_targets, y_pred, beta=2, average='samples')
metrics = {}
argsorted = all_predictions.argsort(axis=1)
for threshold in [0.05,0.10, 0.15, 0.20]:
metrics[f'valid_f2_th_{threshold:.2f}'] = get_score(
binarize_prediction(all_predictions, threshold, argsorted))
metrics['valid_loss'] = np.mean(all_losses)
print(' | '.join(f'{k} {v:.3f}' for k, v in sorted(
metrics.items(), key=lambda kv: -kv[1])))
return metrics
def binarize_prediction(probabilities, threshold: float, argsorted=None,
min_labels=1, max_labels=10):
""" Return matrix of 0/1 predictions, same shape as probabilities.
"""
assert probabilities.shape[1] == N_CLASSES
if argsorted is None:
argsorted = probabilities.argsort(axis=1)
max_mask = _make_mask(argsorted, max_labels)
min_mask = _make_mask(argsorted, min_labels)
prob_mask = probabilities > threshold
return (max_mask & prob_mask) | min_mask
def _make_mask(argsorted, top_n: int):
mask = np.zeros_like(argsorted, dtype=np.uint8)
col_indices = argsorted[:, -top_n:].reshape(-1)
row_indices = [i // top_n for i in range(len(col_indices))]
mask[row_indices, col_indices] = 1
return mask
def _reduce_loss(loss):
return loss.sum() / loss.shape[0]
class arg():
def __init__(self):
self.run_root = 'model2'
self.batch_size = 64
self.step = 1
self.workers = 2
self.lr = 0.0001
self.patience = 2
self.clean = 0
self.n_epochs = 25
self.tta = 4
self.debug = 'store_true'
self.pretrained = 0
self.threshold = 0.1
self.folds = 5
self.limit = 0
self.count = 0
args = arg()
run_root = Path(args.run_root)
folds = make_folds(n_folds = args.folds)
train_root = DATA_ROOT / 'train'
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, logits = True, reduction=False):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduction
def forward(self, inputs, targets):
#print(inputs.size(),targets.size())
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets,reduction='none')
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduction = 'none')
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
Sim = torch.load(DATA_ROOT/'Sim.pt')
Sim = Sim*torch.FloatTensor((Sim>0.5).numpy())
Sim = Sim.cuda()
class SimilarityLoss1(nn.Module):
def __init__(self, sim):
'''
sim : N_class*N_class
'''
super(SimilarityLoss1, self).__init__()
self.sim = sim
def forward(self,input,target):
Smatrix = torch.matmul(target, self.sim) + 1
#print(Smatrix)
P = torch.exp(input)
loss = -(Smatrix*target*(input-torch.log(P+1))+(1-target)*(-torch.log(1+P)))
return loss
class FocalSimilarityLoss1(nn.Module):
def __init__(self, sim, gamma=2):
'''
sim : N_class*N_class
'''
super(FocalSimilarityLoss1, self).__init__()
self.sim = sim
self.gamma = gamma
def forward(self,input,target):
Smatrix = torch.matmul(target, self.sim) + 1
P = torch.exp(input)
loss = -(Smatrix*target*(input-torch.log(P+1))+(1-target)*(-torch.log(1+P)))
pt = torch.exp(-loss)
F_loss = (1-pt)**self.gamma * loss
return F_loss
#criterion = FocalSimilarityLoss1(sim = Sim)
criterion = SimilarityLoss1(sim = Sim)
#criterion = FocalLoss()
class AvgPool(nn.Module):
def forward(self, x):
return F.avg_pool2d(x, x.shape[2:])
class Net(nn.Module):
def __init__(self, num_classes, dropout=True):
super().__init__()
self.net = se_resnext101_32x4d()
self.net.load_state_dict(torch.load(DATA_ROOT/'se_resnext101_32x4d-3b2fe3d8.pth'))
self.net.avg_pool = nn.AdaptiveAvgPool2d(1)
#self.net = nn.Sequential(*list(model0.children())[0])
# print(self.net.output)
if dropout:
# model.add_module('fc', torch.nn.Linear(4096, out_num))
self.net.last_linear = nn.Sequential(
nn.Dropout(),
nn.Linear(self.net.last_linear.in_features, num_classes)
)
else:
self.net.last_linear = nn.Linear(self.net.last_linear.in_features, num_classes)
#self.finetune()
def forward(self, x):
return self.net(x)
def finetune(self):
for para in list(self.net.parameters())[:-2]:
para.requires_grad=False
model = Net(N_CLASSES)
use_cuda = cuda.is_available()
print(use_cuda)
#fresh_params = list(model.fresh_params())
all_params = list(model.parameters())
if use_cuda:
model = model.cuda()
if run_root.exists() and args.clean:
shutil.rmtree(run_root)
run_root.mkdir(exist_ok=True, parents=True)
(run_root / 'params.json').write_text(
json.dumps(vars(args), indent=4, sort_keys=True))
from collections import Counter
def get_count():
df = pd.read_csv('../input/imet-2019-fgvc6/train.csv' if ON_KAGGLE else '/nfsshare/home/white-hearted-orange/data/train.csv')
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split() for cls in classes)
stat = cls_counts.most_common()
stat1 = pd.DataFrame(stat)
stat1.columns=('attribute_id','count')
stat1['attribute_id'].astype('int')
return stat1
count = get_count()
train_kwargs = dict(
args= args,
model = model,
folds = folds,
count = count,
criterion=criterion,
patience=args.patience,
init_optimizer=lambda params, lr: Adam(params, lr),
use_cuda=use_cuda,
)
train(params=all_params, **train_kwargs)
load_model(model, run_root / 'best-model.pt')
predict_kwargs = dict(
batch_size=args.batch_size,
tta=args.tta,
use_cuda=use_cuda,
workers=args.workers,
)
test_root = DATA_ROOT / ('test')
ss = pd.read_csv(DATA_ROOT / 'sample_submission.csv')
predict(model, df=ss, root=test_root,
out_path=run_root / 'test.h5',
**predict_kwargs)
def get_classes(item):
return ' '.join(cls for cls, is_present in item.items() if is_present)
sample_submission = pd.read_csv(
DATA_ROOT / 'sample_submission.csv', index_col='id')
df = pd.read_hdf(run_root / 'test.h5', index_col='id')
df = df.reindex(sample_submission.index)
df = mean_df(df)
df[:] = binarize_prediction(df.values, threshold=args.threshold)
df = df.apply(get_classes, axis=1)
df.name = 'attribute_ids'
df.to_csv('submission.csv', header=True)
| python |
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from platformio import fs
from platformio.package.exception import PackageException
from platformio.package.meta import PackageItem, PackageSpec
class PackageManagerSymlinkMixin(object):
@staticmethod
def is_symlink(path):
return path and path.endswith(".pio-link") and os.path.isfile(path)
@classmethod
def resolve_symlink(cls, path):
assert cls.is_symlink(path)
data = fs.load_json(path)
spec = PackageSpec(**data["spec"])
assert spec.symlink
pkg_dir = spec.uri[10:]
if not os.path.isabs(pkg_dir):
pkg_dir = os.path.normpath(os.path.join(data["cwd"], pkg_dir))
return (pkg_dir if os.path.isdir(pkg_dir) else None, spec)
def get_symlinked_package(self, path):
pkg_dir, spec = self.resolve_symlink(path)
if not pkg_dir:
return None
pkg = PackageItem(os.path.realpath(pkg_dir))
if not pkg.metadata:
pkg.metadata = self.build_metadata(pkg.path, spec)
return pkg
def install_symlink(self, spec):
assert spec.symlink
pkg_dir = spec.uri[10:]
if not os.path.isdir(pkg_dir):
raise PackageException(
f"Can not create a symbolic link for `{pkg_dir}`, not a directory"
)
link_path = os.path.join(
self.package_dir,
"%s.pio-link" % (spec.name or os.path.basename(os.path.abspath(pkg_dir))),
)
with open(link_path, mode="w", encoding="utf-8") as fp:
json.dump(dict(cwd=os.getcwd(), spec=spec.as_dict()), fp)
return self.get_symlinked_package(link_path)
def uninstall_symlink(self, spec):
assert spec.symlink
for name in os.listdir(self.package_dir):
path = os.path.join(self.package_dir, name)
if not self.is_symlink(path):
continue
pkg = self.get_symlinked_package(path)
if pkg.metadata.spec.uri == spec.uri:
os.remove(path)
| python |
from ebonite.core.objects.requirements import InstallableRequirement, Requirements, resolve_requirements
def test_resolve_requirements_arg():
requirements = Requirements([InstallableRequirement('dumb', '0.4.1'), InstallableRequirement('art', '4.0')])
actual_reqs = resolve_requirements(requirements)
assert actual_reqs == requirements
def test_resolve_requirement_arg():
req = InstallableRequirement('dumb', '0.4.1')
actual_reqs = resolve_requirements(req)
assert actual_reqs.installable[0] == req
def test_resolve_requirement_list_arg():
req = [InstallableRequirement('dumb', '0.4.1'), InstallableRequirement('art', '4.0')]
actual_reqs = resolve_requirements(req)
assert len(actual_reqs.installable) == 2
assert actual_reqs.installable == req
def test_resolve_str_arg():
req = "dumb==0.4.1"
actual_reqs = resolve_requirements(req)
assert actual_reqs.installable[0].to_str() == req
def test_resolve_str_list_arg():
req = ["dumb==0.4.1", "art==4.0"]
actual_reqs = resolve_requirements(req)
assert len(actual_reqs.installable) == 2
assert req == [r.to_str() for r in actual_reqs.installable]
| python |
"""
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| python |
# -*- coding: utf-8 -*-
import pytest
from unittest import mock
from pytube import YouTube
from pytube.exceptions import LiveStreamError
from pytube.exceptions import RecordingUnavailable
from pytube.exceptions import RegexMatchError
from pytube.exceptions import VideoUnavailable
from pytube.exceptions import VideoPrivate
def test_video_unavailable():
try:
raise VideoUnavailable(video_id="YLnZklYFe7E")
except VideoUnavailable as e:
assert e.video_id == "YLnZklYFe7E" # noqa: PT017
assert str(e) == "YLnZklYFe7E is unavailable"
def test_regex_match_error():
try:
raise RegexMatchError(caller="hello", pattern="*")
except RegexMatchError as e:
assert str(e) == "hello: could not find match for *"
def test_live_stream_error():
try:
raise LiveStreamError(video_id="YLnZklYFe7E")
except LiveStreamError as e:
assert e.video_id == "YLnZklYFe7E" # noqa: PT017
assert str(e) == "YLnZklYFe7E is streaming live and cannot be loaded"
def test_recording_unavailable():
try:
raise RecordingUnavailable(video_id="5YceQ8YqYMc")
except RecordingUnavailable as e:
assert e.video_id == "5YceQ8YqYMc" # noqa: PT017
assert str(e) == "5YceQ8YqYMc does not have a live stream recording available"
def test_private_error():
try:
raise VideoPrivate("mRe-514tGMg")
except VideoPrivate as e:
assert e.video_id == "mRe-514tGMg" # noqa: PT017
assert str(e) == "mRe-514tGMg is a private video"
def test_raises_video_private(private):
with mock.patch("pytube.request.urlopen") as mock_url_open:
# Mock the responses to YouTube
mock_url_open_object = mock.Mock()
mock_url_open_object.read.side_effect = [
private["watch_html"].encode("utf-8"),
]
mock_url_open.return_value = mock_url_open_object
with pytest.raises(VideoPrivate):
YouTube("https://youtube.com/watch?v=mRe-514tGMg")
def test_raises_recording_unavailable(missing_recording):
with mock.patch("pytube.request.urlopen") as mock_url_open:
# Mock the responses to YouTube
mock_url_open_object = mock.Mock()
mock_url_open_object.read.side_effect = [
missing_recording["watch_html"].encode("utf-8"),
]
mock_url_open.return_value = mock_url_open_object
with pytest.raises(RecordingUnavailable):
YouTube("https://youtube.com/watch?v=5YceQ8YqYMc")
| python |
import sys
import numpy as np
raw = sys.stdin.read()
locs = np.fromstring(raw, dtype=np.int64, sep=',')
average = np.average(locs)
def forLocation(locs, dest):
absolute = np.abs(locs - dest)
return ((absolute + 1) * absolute // 2).sum()
print('Result:', min(
forLocation(locs, int(np.ceil(average))),
forLocation(locs, int(np.floor(average)))
))
| python |
from pycantonese import stop_words
_DEFAULT_STOP_WORDS = stop_words()
def test_stop_words():
_stop_words = stop_words()
assert "唔" in _stop_words
def test_stop_words_add_one_word():
_stop_words = stop_words(add="foobar")
assert "foobar" in _stop_words
assert len(_stop_words) - len(_DEFAULT_STOP_WORDS) == 1
def test_stop_words_remove_one_word():
_stop_words = stop_words(remove="唔")
assert "唔" not in _stop_words
assert len(_DEFAULT_STOP_WORDS) - len(_stop_words) == 1
def test_stop_words_add_multiple_words():
_stop_words = stop_words(add=["foo", "bar", "baz"])
assert {"foo", "bar", "baz"}.issubset(_stop_words)
assert len(_stop_words) - len(_DEFAULT_STOP_WORDS) == 3
def test_stop_words_remove_multiple_words():
_stop_words = stop_words(remove=["唔", "乜嘢", "其他"])
assert not {"唔", "乜嘢", "其他"}.issubset(_stop_words)
assert len(_DEFAULT_STOP_WORDS) - len(_stop_words) == 3
| python |
#!/usr/bin/env python
from __future__ import print_function
import roslib
import rospy
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2 as cv
print(cv.__version__)
class image_converter:
def __init__(self):
self.image_pub = rospy.Publisher("image_bgr8",Image)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/xtion/color/image_raw",Image,self.callback)
def callback(self,data):
try:
img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(rows,cols,channels) = img.shape
if cols > 60 and rows > 60 :
cv.circle(img, (50,50), 10, 0,0,255)
blue = np.mat(img[:, :, 0])
green = np.mat(img[:, :, 1])
red = np.mat(img[:, :, 2])
blue_only = np.int16(blue) - np.int16(red) - np.int16(green)
blue_only[blue_only < 0] = 0
blue_only[blue_only >= 255] = 255
blue_only = np.uint8(blue_only)
kernel = np.ones((5, 5), np.uint8)
imgCanny = cv.Canny(blue_only, 100, 150) # edge detection
imgDilation = cv.dilate(imgCanny, kernel, iterations=1)
imgEroded = cv.erode(imgDilation, kernel, iterations=1)
im2,contours, hierarchy = cv.findContours(imgEroded, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cnt = max(contours, key=len)
x, y, w, h = cv.boundingRect(cnt)
rect = cv.minAreaRect(cnt)
box = cv.boxPoints(rect)
box = np.int0(box)
ang = rect[2]
ang = np.abs(ang)
print("ang_before", ang)
leftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])
rightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])
topmost = tuple(cnt[cnt[:, :, 1].argmin()][0])
bottommost = tuple(cnt[cnt[:, :, 1].argmax()][0])
if ang < 8 or ang > 82:
if w > h:
x1 = x
y1 = y + np.uint8(h / 2)
x2 = x + w
y2 = y1
print("horizontal")
else:
y1 = y
x1 = x + np.uint8(w / 2)
x2 = x1
y2 = y + h
print("vertical")
else:
if ang > 10 or ang < 80:
if rightmost[1] - leftmost[1] >= 20:
x1 = np.int0((leftmost[0] + topmost[0]) / 2)
y1 = np.int0((leftmost[1] + topmost[1]) / 2)
x2 = np.int0((rightmost[0] + bottommost[0]) / 2)
y2 = np.int0((rightmost[1] + bottommost[1]) / 2)
print("left up")
else:
if rightmost[0] > bottommost[0]:
x2 = np.int0((rightmost[0] + bottommost[0]) / 2)
y2 = np.int0((rightmost[1] + bottommost[1]) / 2)
x1 = np.int0((leftmost[0] + topmost[0]) / 2)
y1 = np.int0((leftmost[1] + topmost[1]) / 2)
print("right up 1")
else:
x1 = np.int0((rightmost[0] + topmost[0]) / 2)
y1 = np.int0((rightmost[1] + topmost[1]) / 2)
x2 = np.int0((leftmost[0] + bottommost[0]) / 2)
y2 = np.int0((leftmost[1] + bottommost[1]) / 2)
print("right up 2")
else:
if w > h:
x1 = x
y1 = y + np.uint8(h / 2)
x2 = x + w
y2 = y1
print("horizontal 2")
else:
y1 = y
x1 = x + np.uint8(w / 2)
x2 = x1
y2 = y + h
print("vertical 2")
print("ang", ang)
print("leftmost:", leftmost, "rightmost:", rightmost, "topmost:", topmost, "bottommost:", bottommost, "\n")
print("x1, y1",x1,y1 ,"x2,y2", x2,y2)
print("box", box)
# cv.drawContours(imgEroded, [box], 0, (255, 0, 255), 2)
# print("x:", x, "y:", y, "w:", w, "h:", h, "\n")
#
#
cv.circle(imgEroded, (x1, y1), 10, (255, 0, 0), 2)
cv.circle(imgEroded, (x2, y2), 10, (255, 0, 0), 2)
# cv.drawContours(img, contours, 0, (255, 255, 0), 5)
cv.drawContours(img, cnt, -1, (0, 255, 255), 5)
# cv.imshow("Orig", img)
cv.imshow("Eroded Image", imgEroded)
cv.waitKey(1)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(img, "bgr8"))
except CvBridgeError as e:
print(e)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv) | python |
import numpy as np
from whole_body_mpc_msgs.msg import StateFeedbackGain
import copy
class StateFeedbackGainInterface():
def __init__(self, nx, nu, frame_id="world"):
self._msg = StateFeedbackGain()
self._msg.header.frame_id = frame_id
self._msg.nx = nx
self._msg.nu = nu
self._msg.data = [None] * (nx * nu)
self._K = np.zeros([nu, nx])
def writeToMessage(self, K):
if K.shape[0] is not self._msg.nu:
print("Couldn't convert the state feedback gain into a message since nu is not consistent")
return
if K.shape[1] is not self._msg.nx:
print("Couldn't convert the state feedback gain into a message since nx is not consistent")
return
for i in range(self._msg.nu):
for j in range(self._msg.nx):
self._msg.data[i * self._msg.nx + j] = K[i, j]
return copy.deepcopy(self._msg)
def writeFromMessage(self, msg):
if msg.nu is not self._K.shape[0]:
print("Couldn't convert the message into a state feedback gain into since nu is not consistent")
return
if msg.nx is not self._K.shape[1]:
print("Couldn't convert the message into a state feedback gain since nx is not consistent")
return
for i in range(msg.nu):
for j in range(msg.nx):
self._K[i, j] = msg.data[i * msg.nx + j]
return copy.deepcopy(self._K)
| python |
""" This script runs every 10 seconds and assigns users to a new batch of tasks filtered by the specified column.
Notes:
1. Don't forget to enable Manual mode in Annotation settings
2. Be careful when adding email users: users who are not members of the project or workspace will break Data Manager
Install:
git clone https://github.com/heartexlabs/label-studio-sdk.git
cd label-studio-sdk
pip install -e .
python examples/label_studio_enterprise/assigner.py
Demo video:
https://www.youtube.com/watch?v=IeqrsCYYQ8k
"""
import time
import math
import label_studio_sdk
from label_studio_sdk.data_manager import Filters, Column, Operator, Type
class BatchAssigner:
def __init__(self, host, api_key, project_id):
self.ls = label_studio_sdk.Client(url=host, api_key=api_key)
self.project = self.ls.get_project(id=project_id)
def get_tasks(self, filter_column, filter_value, page, page_size):
""" Get tasks with filter by column and page number
"""
filters = Filters.create(Filters.OR, [
Filters.item(
Column.data(filter_column),
Operator.EQUAL,
Type.String,
Filters.value(filter_value)
)
])
return self.project.get_paginated_tasks(filters=filters, page=page, page_size=page_size, only_ids=True)
def get_page_total(self, filter_column, filter_value, page_size):
""" Total page number for tasks with filter by column and specified page size
"""
result = self.get_tasks(filter_column, filter_value, 1, page_size)
return math.ceil(result['total'] / float(page_size))
def get_user_ids(self, emails):
""" Get user IDs by email and preserve the order
:param emails: list of strings with email addresses
:return: user IDs in the same order as email addresses
"""
# get all users
user_ids = []
users = self.ls.get_users()
for email in emails:
for user in users:
if email == user.email:
print(user.email, '=>', user.id)
user_ids.append(user.id)
break
return user_ids
def assign_users_to_tasks(self,
user_ids,
filter_column='organization',
filter_value='name',
page=1,
page_size=100):
""" Assign annotators to filter by specified column and paginated tasks
:param user_ids: list of user email addresses
:param filter_column: str with data column name from Data Manager
:param filter_value: str with data value to filter as equal
:param page: current page
:param page_size: task number
:return: True if success else False or exception
"""
result = self.get_tasks(filter_column, filter_value, page, page_size)
task_ids = result['tasks']
if not task_ids:
print(f'No tasks found')
return False
# call assign API
body = {
"type": "AN",
"users": user_ids,
"selectedItems": {
"all": False,
"included": task_ids
}
}
self.ls.make_request('post', f'/api/projects/{self.project.id}/tasks/assignees', json=body)
print(f'Users {user_ids} were assigned to {len(task_ids)} tasks '
f'from id={task_ids[0]} to id={task_ids[-1]}')
return True
def start():
host = 'http://localhost:8000'
api_key = 'e0b7751e84a059b0accaf14392e5e9fd4abe3de7'
project_id = 182
filter_column = 'shortname'
filter_value = 'opossum'
page_size = 10
emails = ['[email protected]', '[email protected]']
assigner = BatchAssigner(host, api_key, project_id)
# Be careful when using email users:
# users who are not members of the project or workspace will break Data Manager
user_ids = assigner.get_user_ids(emails=emails)
page_total = assigner.get_page_total(filter_column, filter_value, page_size)
print(f'Total pages for {filter_column}={filter_value} => {page_total}')
for current_page in range(1, page_total+1):
assigner.assign_users_to_tasks(
filter_column=filter_column,
filter_value=filter_value,
user_ids=user_ids,
page=current_page,
page_size=page_size
)
time.sleep(10)
if __name__ == '__main__':
start()
| python |
import time
import requests
from requests.exceptions import HTTPError, Timeout
from bs4 import BeautifulSoup
from core.log_manager import logger
class Updates:
MAX_LENGTH = 25 # Maximum amount of numbers that a version can support
TIME_INTERVAL = 48 # In hours
def __init__(self, link, local_version):
self.raw_local_version = str(local_version)
self.url = link if link[-1] == "/" else link + "/" # IMPORTANT: the url must contain a slash at the end
def get_remote_version(self):
"""Gets the last version of the remote AutomatiK repository."""
try:
req = requests.get(self.url) # Gets the HTML code from the web page
except (HTTPError, Timeout, requests.exceptions.ConnectionError):
logger.error("Version request to GitHub failed")
return False
soup = BeautifulSoup(req.content, "html.parser")
try:
remote_version = soup.find("span", # Type of container
{"class": "css-truncate-target"}, # Additional attrs
recursive=True).text # Parameters of the search
except AttributeError:
logger.error("Version parsing from GitHub failed")
return False
return remote_version
def convert(self, raw_remote_version):
"""Converts the complex syntax of a version to an integer."""
if not raw_remote_version:
return False
local_version = "".join([x for x in self.raw_local_version if x.isdigit()])
local_version += "0" * (Updates.MAX_LENGTH - len(local_version))
remote_version = "".join([x for x in raw_remote_version if x.isdigit()])
remote_version += "0" * (Updates.MAX_LENGTH - len(remote_version))
# If the number of 25 digits of the remote version is higher, then It is a newer one
if int(remote_version) > int(local_version):
logger.info(f"New update ({raw_remote_version}) available at {self.url + raw_remote_version}")
return {"remote": int(remote_version), "local": int(local_version)}
def start_checking(self):
"""Starts looking for new version every X hours."""
while True:
self.convert(self.get_remote_version())
time.sleep(Updates.TIME_INTERVAL * 3600)
| python |
import numpy
from typing import List
from skipi.function import Function
class AverageFunction(Function):
@classmethod
def from_functions(cls, functions: List[Function], domain=None):
r"""
Returns the average function based on the functions given as a list F = [f_1, ..., f_n]
::math..
f_avg(x) = 1/n * (f_1(x) + \ldots + f_n(x))
where f_i is an element of F
:param functions: List of functions to average
:return:
"""
n = len(functions)
if n == 0:
raise RuntimeError("Cannot average functions if no function was given")
if n == 1:
return functions[0]
if domain is None:
domain = functions[0].get_domain()
# sum of axis=0, since x might be a vector containing multiple evaluation points
return cls(domain, lambda x: numpy.sum([f(x) for f in functions], axis=0) / n)
class ComputeAverage(Function):
@classmethod
def from_functions(cls, functions: [Function], domain=None, avg_fun=None):
if domain is None:
domain = functions[0].get_domain()
if avg_fun is None:
avg_fun = cls.avg
return Function.to_function(domain, lambda x: avg_fun([f(x) for f in functions]))
@staticmethod
def avg(numbers):
numbers = numpy.array(numbers)
return numpy.average(numbers.real) + 1j * numpy.average(numbers.imag)
class DrawFromFunction(Function):
@classmethod
def from_function(cls, function: Function):
dy = function.dy
if dy is None:
return function
value = numpy.random.normal(function.eval().real, dy.eval().real)
if function.is_complex():
value = value + 1j * numpy.random.normal(function.eval().imag, dy.eval().imag)
return Function.to_function(function.get_dom(), value)
class ComputeStandardDeviation(Function):
@classmethod
def from_functions(cls, functions: [Function], domain=None, std_fun=None):
"""
Computes the standard deviation (pointwise) using all functions
If domain is None, the domain from the first function will be used
If std_fun is None, the "complex" standard deviation will be used, see the method cstd.
:param functions: A list of functions from which the std should be calculated
:param domain: A domain
:param std_fun: A function calculating the std
:return: new Function
"""
if domain is None:
domain = functions[0].get_domain()
if std_fun is None:
std_fun = cls.cstd
return Function.to_function(domain, lambda x: std_fun([f(x) for f in functions]))
@staticmethod
def cstd(complexs):
"""
Calculates the standard deviation of a complex number by splitting it into the real and imaginary
part, resulting in a complex standard deviation:
cstd(complex) = std(complex.real) + 1j*std(complex.imag).
:param complexs:
:return:
"""
complexs = numpy.array(complexs)
return numpy.std(complexs.real) + 1j * numpy.std(complexs.imag)
class MaxOfFunctions(Function):
@classmethod
def from_functions(cls, functions: [Function]):
return Function.to_function(functions[0].get_dom(), lambda x: numpy.max([f(x) for f in functions]))
class MinOfFunctions(Function):
@classmethod
def from_functions(cls, functions: [Function]):
return Function.to_function(functions[0].get_dom(), lambda x: numpy.min([f(x) for f in functions]))
| python |
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
def getBooks(soup):
siteURL = 'http://www.thelatinlibrary.com'
textsURL = []
# get links to books in the collection
for a in soup.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, a['href']))
# remove unnecessary URLs
while ("http://www.thelatinlibrary.com//index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com//index.html")
textsURL.remove("http://www.thelatinlibrary.com//classics.html")
textsURL.remove("http://www.thelatinlibrary.com//christian")
logger.info("\n".join(textsURL))
return textsURL
def getSermons(soup):
siteURL = 'http://www.thelatinlibrary.com/augustine'
textsURL = []
# get links to books in the collection
for a in soup.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, a['href']))
# remove unnecessary URLs
while ("http://www.thelatinlibrary.com/augustine//index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/augustine//index.html")
textsURL.remove("http://www.thelatinlibrary.com/augustine//classics.html")
textsURL.remove("http://www.thelatinlibrary.com/augustine//christian")
textsURL.remove("http://www.thelatinlibrary.com/augustine//august.html")
logger.info("\n".join(textsURL))
return textsURL
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/august.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.strip()
colltitle = "AUGUSTINE OF HIPPO"
date = collSOUP.span.string.strip().replace('(', '').replace(')', '').replace(u"\u2013", '-')
textsURL = getBooks(collSOUP)
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Augustine'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
if url.startswith("http://www.thelatinlibrary.com/augustine/iulianus1.shtml"):
title = "CONTRA SECUNDAM IULIANI RESPONSIONEM LIBER PRIMUS"
elif url.startswith("http://www.thelatinlibrary.com/augustine/iulianus2.shtml"):
title = "CONTRA SECUNDAM IULIANI RESPONSIONEM LIBER SECUNDUS"
else:
try:
title = textsoup.title.string.split(':')[1].strip()
except:
try:
title = textsoup.title.string.split(',')[1].strip()
except:
title = textsoup.find('p', class_='pagehead').string.strip()
print(title)
if title.startswith("Confessions"):
getp = textsoup.find_all('p')
chapter = 0
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
if re.match("[0-9]+", pstring):
if " " in pstring:
heading = pstring.split(" ")[0]
pstring = pstring.split(" ")[1]
chapter = heading.split(".")[1].strip()
verse = heading.split(".")[2].strip()
else:
chapter = pstring.split(".")[1].strip()
verse = pstring.split(".")[2].strip()
continue
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v.startswith('commentary'):
# ignore an english note in there
continue
if v is None or v == '' or v.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("SANCTI AUGUSTINI EPISTULA"):
getp = textsoup.find_all('p')
chapter = 0
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("De Civitate Dei"):
getp = textsoup.find_all('p')
chapter = 0
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
if re.match("\[", pstring):
# this is a chapter heading
chapter = pstring.split("]")[0].replace("[", "").strip()
verse = 0
pstring = pstring.split("]")[1].strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("de Trinitate"):
getp = textsoup.find_all('p')
chapter = "PROLOGUS"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
if p.find('b') is not None:
continue
# these headings are handled elsewhere
if re.match("\[", pstring):
# this is a heading
heading = pstring.split("]")[0].replace("[", "").strip()
if re.match("[IVXL]+", heading):
# this is a chapter and verse heading
try:
chapter = re.split(" ", heading)[0].strip()
verse = re.split(" ", heading)[1].strip()
except:
verse = heading
else:
verse = heading
pstring = pstring.split("]")[1].strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("CONTRA SECUNDAM IULIANI RESPONSIONEM"):
getp = textsoup.find_all('p')
chapter = "PRAEFATIO"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
# does this leave numbers in the text from footnote links?
pstring = pstring.strip()
if p.find('br') is not None:
# skip footnotes - not sure about this?
continue
# used bolded headings as chapters
# left numbers in text
# can be changed if neccesary
if p.find('b') is not None:
if pstring.startswith("PRAEFATIO") or pstring.startswith("LIBER"):
continue
# these headings are handled elsewhere
else:
chapter = pstring
verse = 0
continue
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("de Dialectica"):
getp = textsoup.find_all('p')
chapter = 0
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
if re.match("[IVXL]+", pstring):
# this is a chapter heading
chapter = pstring.split(".")[0].strip()
verse = 0
pstring = pstring.split(".")[1].strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("de Fide"):
# verses are split across chapter headings, so they get double entered
# e.g. there are two verse 21s, one in Caput IX and one in Caput X
getp = textsoup.find_all('p')
chapter = "-1"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
pstring = p.get_text()
pstring = pstring.strip()
if p.find('b') is not None:
chapter = pstring
continue
lines = re.split("([0-9]+\.)", pstring)
for l in lines:
if re.match("[0-9]", l):
verse += 1
continue
if l.startswith('Augustine'):
continue
if l is None or l == '' or l.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, l.strip(), url, 'prose'))
elif title.startswith("de Catechizandis"):
getp = textsoup.find_all('p')
chapter = "-1"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
pstring = p.get_text()
pstring = pstring.strip()
if p.find('b') is not None:
chapter = p.find('b').string.strip()
pstring = pstring.replace(chapter, "").strip()
lines = re.split("([0-9]+\.)", pstring)
for l in lines:
if re.match("[0-9]", l):
verse += 1
continue
if l.startswith('Augustine'):
continue
if l is None or l == '' or l.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, l.strip(), url, 'prose'))
elif title.startswith("REGULA SANCTI AUGUSTINI"):
getp = textsoup.find_all('p')
chapter = "-1"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
pstring = p.get_text()
pstring = pstring.strip()
if p.find('b') is not None:
chapter = pstring
continue
lines = re.split("([0-9]+\.)", pstring)
for l in lines:
if re.match("[0-9]", l):
verse += 1
continue
if l.startswith('Augustine'):
continue
if l is None or l == '' or l.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, l.strip(), url, 'prose'))
else:
sermons = getSermons(textsoup)
# these are the Sermons, which have their own page of links
for s in sermons:
sermonurl = urllib.request.urlopen(s)
sermonsoup = BeautifulSoup(sermonurl, 'html5lib')
title = sermonsoup.title.string.split(':')[1].strip()
print(title)
getp = sermonsoup.find_all('p')
chapter = "-1"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
if __name__ == '__main__':
main()
| python |
import scipy.integrate as scin
import numpy as np
import matplotlib.pyplot as pl
g=9.80665
Cd=0.2028
m=80
ics = ([0,0])
t = np.linspace(0,100,500) #creates an array t, integration range from 0 and inclusive of 100 since its linspace, increment of 500
def deriv(x,t):
F = np.zeros(2) #creates an array F, with length 2 thats filled with 0's
F[0]=x[1] #dy/dt = y'
F[1]=g-((Cd/m)*((x[1])**2)) #d2y/dt2 = g-((cd/m) * (y'^2))
return F
sol_1 = scin.odeint(deriv,ics,t) #odeint outputs array --> [y solution, dy/dt solution]
x_1 = sol_1[:,0] #(every value in) the y solution column
y_1 = sol_1[:,1] #(every value in) the dy/dt solution column
print sol_1
#print x_1
#print y_1
pl.figure(1)
pl.plot(t,x_1,'r-')
pl.xlabel('Time(s)')
pl.ylabel('y')
pl.show()
pl.figure(2)
pl.plot(t,y_1,'r-')
pl.xlabel('Time(s)')
pl.ylabel('y')
pl.show()
| python |
# -*- coding: utf-8 -*-
"""
wakatime.projects.projectmap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use the ~/.wakatime.cfg file to set custom project names by
recursively matching folder paths.
Project maps go under the [projectmap] config section.
For example:
[projectmap]
/home/user/projects/foo = new project name
/home/user/projects/bar = project2
Will result in file `/home/user/projects/foo/src/main.c` to have
project name `new project name`.
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
from .base import BaseProject
log = logging.getLogger(__name__)
# str is unicode in Python3
try:
unicode
except NameError:
unicode = str
class ProjectMap(BaseProject):
def process(self):
if not self._configs:
return False
self.project = self._find_project(self.path)
return self.project is not None
def _find_project(self, path):
path = os.path.realpath(path)
if os.path.isfile(path):
path = os.path.split(path)[0]
if self._configs.get(path.lower()):
return self._configs.get(path.lower())
if self._configs.get('%s/' % path.lower()):
return self._configs.get('%s/' % path.lower())
if self._configs.get('%s\\' % path.lower()):
return self._configs.get('%s\\' % path.lower())
split_path = os.path.split(path)
if split_path[1] == '':
return None
return self._find_project(split_path[0])
def branch(self):
return None
def name(self):
if self.project:
return unicode(self.project)
return None
| python |
__all__ = ['features','graph_layers'] | python |
# Author: Steven C. Dang
# Class for most common operations with TA2
import logging
import grpc
from os import path
from google.protobuf.json_format import MessageToJson
import pandas as pd
# D3M TA2 API imports
from .api_v3 import core_pb2, core_pb2_grpc
from .api_v3 import value_pb2
from .api_v3 import problem_pb2
from ls_problem_desc.d3m_problem import *
from modeling.models import *
logger = logging.getLogger(__name__)
class TA2Client(object):
"""
A client for common interactions with a TA2 system
"""
__name__ = "CMU Tigris TA3 v2.0"
__version__ = "v2018.5.1"
__protocol_version__ = core_pb2.DESCRIPTOR.GetOptions().Extensions[core_pb2.protocol_version]
__allowed_values__ = [value_pb2.RAW, value_pb2.DATASET_URI, value_pb2.CSV_URI]
def __init__(self, addr, debug=False, out_dir=None, name=None):
logger.info("Initializing TA2 Client with address: %s" % addr)
self.addr = addr
self.debug = debug
if debug and out_dir is not None:
self.out_dir = out_dir
else:
self.out_dir = ""
self.name = name
channel = grpc.insecure_channel(addr)
self.serv = core_pb2_grpc.CoreStub(channel)
logger.debug("Connected to server")
# Send hello and parse response for ta2 defaults
msg = core_pb2.HelloRequest()
logger.debug("Formed hello request: %s" % str(msg))
reply = self.serv.Hello(msg)
logger.debug("Got Response to hello request: %s" % str(reply))
self.user_agent = reply.user_agent
self.version = reply.version
self.supported_extensions = reply.supported_extensions
self.allowed_values = reply.allowed_value_types
logger.info("Connected to TA2 System, %s, using api version, %s" % (self.user_agent, self.version))
logger.debug("TA2 allowed values: %s" % str(self.allowed_values))
logger.debug("TA2 supported extensions: %s" % str(self.supported_extensions))
self.search_solution_requests = {}
self.fitted_solution_requests = {}
self.produce_solution_requests = {}
def get_id(self):
return "%s-%s" % (self.__name__, self.__version__)
def write_msg_to_file(self, msg, file_name):
"""
Write a given message to file
"""
with open(path.join(self.out_dir, file_name), 'w') as out_file:
out_file.write(MessageToJson(msg))
def hello(self):
"""
Ping the TA2 server and return the result
"""
logger.info("Sending Hello to TA2 server, %s, at: %s" % (self.user_agent, self.addr))
msg = core_pb2.HelloRequest()
logger.debug("Formed hello request: %s" % str(msg))
if self.debug:
self.write_msg_to_file(msg, 'hello_request.json')
reply = self.serv.Hello(msg)
logger.debug("Got Response to hello request: %s" % str(reply))
if self.debug:
self.write_msg_to_file(reply, 'hello_response.json')
return reply
def search_solutions(self, prob, dataset, inputs=None, pipeline=None, max_time=0, priority=0,
get_request=False):
"""
Initiate a solution search request
"""
if type(prob) == GRPCProblemDesc:
logger.debug("searching with proper GRPC problem description")
p = prob
else:
logger.debug("Converting %s to GRPC problem desc" % str(type(prob)))
p = GRPCProblemDesc.from_problem_desc(prob)
msg = core_pb2.SearchSolutionsRequest(
user_agent = self.__name__,
version = self.__protocol_version__,
allowed_value_types = self.__allowed_values__,
time_bound = max_time,
priority = priority,
problem = p.to_protobuf(),
)
if pipeline is not None:
msg.template = pipeline
# else:
# #Produce a pipeline with only a placeholder
# pipe = pipeline_pb2.PipelineDescription()
# pipe.source = self.get_id()
# pipe.context = pipeline_pb2.TESTING
# out = pipe.outputs.add()
# Add inputs if given
if inputs is None:
i = msg.inputs.add()
i.dataset_uri = dataset.get_schema_uri()
else:
for inpt in inputs:
i = msg.inputs.add()
# For now force it into a string until type checking is implemented
i.string = str(inpt)
# logger.debug("################################")
# logger.debug("Sending msg: %s" % str(msg))
# for ip in msg.inputs:
# logger.debug("Got file uri: %s" % ip)
# logger.debug("Got file uri: %s" % ip.dataset_uri)
if self.debug:
self.write_msg_to_file(msg, 'search_request.json')
logger.debug("Sending Search Solution request: %s" % str(msg))
reply = self.serv.SearchSolutions(msg)
if self.debug:
self.write_msg_to_file(reply, 'search_reply.json')
# Queue the msg for tracking
self.search_solution_requests[reply.search_id] = msg
if get_request:
return reply.search_id, msg
else:
return reply.search_id
def get_search_solutions_results(self, sid):
logger.info("Geting Search Solution request results for search id: %s" % sid)
msg = core_pb2.GetSearchSolutionsResultsRequest(
search_id = sid
)
soln_ids = set()
for reply in self.serv.GetSearchSolutionsResults(msg):
logger.debug("Got message: %s" % str(reply))
if reply.solution_id:
logger.debug("Got a message with a solution id: %s" % reply.solution_id)
soln_ids.add(reply.solution_id)
if reply.progress.state == core_pb2.PENDING:
logger.debug("Search is still pending and hasn't begin")
elif reply.progress.state == core_pb2.RUNNING:
logger.debug("Search is currently running and has not completed: %s" % reply.progress.status)
elif reply.progress.state == core_pb2.COMPLETED:
logger.info("Search has completed successfully: %s" % reply.progress.status)
elif reply.progress.state == core_pb2.ERRORED:
logger.error("Search has completed in an error state: %s" % reply.progress.status)
raise Exception("Search Solution returned in error: %s" % reply.progress.status)
else:
logger.warning("Search is in an unknown state: %s" % str(reply.progress))
if len(soln_ids) == 0:
return None
else:
return list(soln_ids)
def end_search_solutions(self, sid):
msg = core_pb2.EndSearchSolutionsRequest(search_id=sid)
reply = self.serv.EndSearchSolutions(msg)
if sid in self.search_solution_requests:
try:
del search_solution_requestsi[sid]
except KeyError:
logger.warning("Can't find search with ID, %s, to end search" % sid)
else:
logger.warning("Search solution request ID not found. May already have removed this: %s" % sid)
logger.info("Ended Search for solutions")
def stop_search_solutions(self, sid):
msg = core_pb2.StopSearchSolutionsRequest(search_id=sid)
reply = self.serv.StopSearchSolutions(msg)
logger.info("Stopped Search for solutions")
def get_default_scoring_config(self):
cfg = core_pb2.ScoringConfiguration(
method = core_pb2.K_FOLD,
folds = 10,
train_test_ratio = 5,
shuffle = True
)
return cfg
def describe_solution(self, sid):
logger.info("Requesting description of solution with id: %s" % sid)
msg = core_pb2.DescribeSolutionRequest(
solution_id = sid
)
if self.debug:
self.write_msg_to_file(msg, 'describe_solution_request.json')
reply = self.serv.DescribeSolution(msg)
if self.debug:
self.write_msg_to_file(msg, 'describe_solution_reply.json')
logger.debug("Got describe solution reply: %s" % str(reply))
model = Model(sid)
model.add_description_from_protobuf(reply.pipeline)
return model
def score_solution(self, sln, dataset, inputs=None, metrics=None):
logger.info("Requesting to score solution with id: %s" % sln.id)
msg = core_pb2.ScoreSolutionRequest(
solution_id=sln.id,
configuration=self.get_default_scoring_config()
)
# Add inputs if given
if inputs is None:
i = msg.inputs.add()
i.dataset_uri = dataset.get_schema_uri()
else:
for inpt in inputs:
i = msg.inputs.add()
# For now force it into a string until type checking is implemented
i.string = str(inpt)
# Add metrics if given
if metrics is None:
m = msg.performance_metrics.add()
m.metric = problem_pb2.ACCURACY
else:
for met in metrics:
metric = met.to_protobuf()
m = msg.performance_metrics.add()
m.metric = metric.metric
if self.debug:
self.write_msg_to_file(msg, 'score_solution_request.json')
logger.debug("Sending Score solution request: \n%s" % str(msg))
reply = self.serv.ScoreSolution(msg)
if self.debug:
self.write_msg_to_file(reply, 'score_solution_reply.json')
return reply.request_id
def get_score_solution_results(self, rid):
logger.info("Getting Score Solution Results with request id: %s" % rid)
msg = core_pb2.GetScoreSolutionResultsRequest(
request_id = rid
)
soln_scores = []
for reply in self.serv.GetScoreSolutionResults(msg):
if reply.progress.state == core_pb2.PENDING:
logger.debug("Scoring solution is still pending and hasn't begin")
elif reply.progress.state == core_pb2.RUNNING:
logger.debug("Scoring solution is currently running and has not completed: %s" % reply.progress.status)
elif reply.progress.state == core_pb2.COMPLETED:
logger.info("Scoring solution has completed successfully: %s" % reply.progress.status)
if self.debug:
self.write_msg_to_file(reply, 'score_solution_result_reply.json')
return reply.scores
elif reply.progress.state == core_pb2.ERRORED:
logger.error("Scoring solution has completed in an error state: %s" % reply.progress.status)
if self.debug:
self.write_msg_to_file(reply, 'score_solution_result_reply.json')
else:
logger.warning("Scoring solution is in an unknown state: %s" % str(reply.progress))
logger.debug("Returned %i scores-sets" % len(soln_scores))
for soln in soln_scores:
logger.debug("Score solution received: %s" % str(soln))
return soln_scores
def fit_solution(self, soln, dataset, inputs=None, outputs=None):
logger.info("Fitting solution with id: %s\t on dataset at: %s" %
(soln.id, dataset.get_schema_uri())
)
msg = core_pb2.FitSolutionRequest(
solution_id = soln.id,
)
# Add inputs if given
if inputs is None:
i = msg.inputs.add()
i.dataset_uri = dataset.get_schema_uri()
else:
for inpt in inputs:
i = msg.inputs.add()
# For now force it into a string until type checking is implemented
i.string = str(inpt)
# Add list of outputs to expose
if outputs is None:
if 'mit' in self.name:
logger.debug("Using pipeline format 'describe'")
msg.expose_outputs.extend([soln.get_default_output(format='describe')])
else:
logger.debug("Using pipeline format 'name'")
msg.expose_outputs.extend([soln.get_default_output(format='name')])
allowed_vals = [val for val in self.allowed_values if val in self.__allowed_values__]
msg.expose_value_types.extend(allowed_vals)
if self.debug:
with open(os.path.join(self.out_dir, 'model.json'), 'w') as model_file:
model_file.write(json.dumps(soln.to_dict()))
self.write_msg_to_file(msg, 'fit_solution_request.json')
logger.debug("Sending Fit request msg: %s" % str(msg))
reply = self.serv.FitSolution(msg)
if self.debug:
self.write_msg_to_file(reply, 'fit_solution_reply.json')
self.fitted_solution_requests[reply.request_id] = msg
return reply.request_id
def get_fit_solution_results(self, rid):
logger.info("Getting fit solution results for request with id: %s" % rid)
msg = core_pb2.GetFitSolutionResultsRequest(
request_id = rid
)
results = None
for reply in self.serv.GetFitSolutionResults(msg):
if reply.progress.state == core_pb2.PENDING:
logger.debug("Fitting model to solution is still pending and hasn't begin")
elif reply.progress.state == core_pb2.RUNNING:
logger.debug("Fitting model to solution is currently running and has not completed: %s" % reply.progress.status)
elif reply.progress.state == core_pb2.COMPLETED:
logger.info("Fitting model to solution has completed successfully: %s" % reply.progress.status)
# logger.debug("Got reply: %s" % str(reply))
results = reply
elif reply.progress.state == core_pb2.ERRORED:
logger.error("Fitting model to solution has completed in an error state: %s" % reply.progress.status)
else:
logger.warning("Fittin model to solution is in an unknown state: %s" % str(reply.progress))
request = self.fitted_solution_requests.pop(rid, None)
for i in results.exposed_outputs:
if i == 'predictions':
logger.debug(results.exposed_outputs[i])
if results.exposed_outputs[i].HasField("csv_uri"):
# logger.debug(results.exposed_outputs[i].csv_uri)
result_data = pd.read_csv(results.exposed_outputs[i].csv_uri)
# logger.debug(result_data.head())
else:
logger.debug("Output label: %s" % i)
# logger.debug(results.exposed_outputs[value_pb2.CSV_URI])
return results.fitted_solution_id, result_data
def produce_solution(self, fsid, soln, ds, inputs=None, outputs=None):
logger.info("Produce predictions for solution with id: %s" % soln.id)
msg = core_pb2.ProduceSolutionRequest(
fitted_solution_id = fsid
)
# Add inputs if given
if inputs is None:
i = msg.inputs.add()
i.dataset_uri = ds.get_schema_uri()
else:
for inpt in inputs:
i = msg.inputs.add()
# For now force it into a string until type checking is implemented
i.string = str(inpt)
# Add list of outputs to expose
if outputs is None:
if 'mit' in self.name:
logger.debug("Using pipeline format 'describe'")
msg.expose_outputs.extend([soln.get_default_output(format='describe')])
else:
logger.debug("Using pipeline format 'name'")
msg.expose_outputs.extend([soln.get_default_output(format='name')])
allowed_vals = [val for val in self.allowed_values if val in self.__allowed_values__]
msg.expose_value_types.extend(allowed_vals)
logger.info("****************************************")
msg_js = json_format.MessageToJson(msg)
logger.info("Sending produce solution with msg: %s" % msg_js)
logger.info("****************************************")
if self.debug:
self.write_msg_to_file(msg, 'produce_solution_msg.json')
reply = self.serv.ProduceSolution(msg)
if self.debug:
self.write_msg_to_file(reply, 'produce_solution_reply.json')
self.produce_solution_requests[reply.request_id] = msg
return reply.request_id
def get_produce_solution_results(self, req_id):
logger.info("Getting ProduceSolutionRequest results with id: %s" % req_id)
msg = core_pb2.GetProduceSolutionResultsRequest(
request_id = req_id
)
for reply in self.serv.GetProduceSolutionResults(msg):
if reply.progress.state == core_pb2.PENDING:
logger.debug("Fitting model to solution is still pending and hasn't begin")
elif reply.progress.state == core_pb2.RUNNING:
logger.debug("Fitting model to solution is currently running and has not completed: %s" % reply.progress.status)
elif reply.progress.state == core_pb2.COMPLETED:
logger.info("Fitting model to solution has completed successfully: %s" % reply.progress.status)
return reply.exposed_outputs
elif reply.progress.state == core_pb2.ERRORED:
logger.error("Fitting model to solution has completed in an error state: %s" % reply.progress.status)
else:
logger.warning("Fittin model to solution is in an unknown state: %s" % str(reply.progress))
# logger.debug("Got %i completed responses" % len(replies))
# fitted_ids = [reply.fitted_solution_id for reply in replies]
request = self.produce_solution_requests.pop(rid, None)
def list_primitives(self):
logger.info("Getting list of TA2 primitives supported")
msg = core_pb2.ListPrimitivesRequest()
reply = self.serv.ListPrimitives(msg)
logger.debug("Got reply: %s" % str(reply))
return reply.primitives
def export_solution(self, model, fit_id, rank):
logger.info("Requesting export of solution with id; %s" % model.id)
if model.fitted_id != fit_id:
raise Exception("Model does not have a fit matching, %s\nAvailable fits: %s" %
(fit_id, model.fitted_id))
msg = core_pb2.SolutionExportRequest(
fitted_solution_id = fit_id,
rank = rank
)
if self.debug:
self.write_msg_to_file(msg, 'export_solution_request.json')
self.serv.SolutionExport(msg)
| python |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import MySQLdb
import requests
from lxml import etree,html
import re
from datetime import date,datetime
from time import sleep, time
import simplejson
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm import tqdm
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument('--headless')
options.add_argument('--remote-debugging-port=9222')
options.add_argument('--no-sandbox')
options.add_argument('user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36')
#from selenium import webdriver
#from webdriver_manager.chrome import ChromeDriverManager
driver = webdriver.Chrome('/usr/bin/chromedriver', chrome_options=options)
#s = requests
log_file = open("logs.txt","a")
db = MySQLdb.connect("----localhost----","----user----","-----PASSWORD-----","scrap")
#print(db)
cursor = db.cursor()
def logWrite(text):
log_file.write("["+str(date.today().strftime("%d/%m/%Y")) + "]-" + "Amazon warning" + "-" + text+'\n')
def scrapeAmazon(amazonURL):
amazonASIN = re.findall(r'dp/([^/]+)',amazonURL)
try:
driver.get(amazonURL)
responseAmazon = driver.page_source
except:
responseAmazon = ""
successCode = 0
log_file.write("selenium crashed"+"\n")
successCode = 0
amazonStock = 0
amazonPrice = 0
amazon3rdStock = 0
amazon3rdPrice = 0
#print(responseAmazon.text)
try:
tree = html.fromstring(responseAmazon)
mainTree = tree.xpath("//div[@id='histories']")
try:
amazonTitle = tree.xpath("//span[@id='productTitle']/text()")
#print(amazonTitle)
amazonTitle = amazonTitle[0].replace("\n","").replace(" ","")
except:
amazonTitle = ""
try:
amazonImg = tree.xpath("//img[contains(@src,'images-amazon.com/images/I/')]/@src")
#for im in amazonImg:
#print(im)
temp = re.findall(r'images/I/([^.]+)',amazonImg[0])
amazonImg = "https://images-na.ssl-images-amazon.com/images/I/"+temp[0]+".jpg"
#print(amazonImg)
except:
amazonImg = ""
try:
amazonRating = tree.xpath("//*[@id='acrPopover']/@title")
amazonRating = amazonRating[0].replace(" out of 5 stars","")
except:
amazonRating = ""
sellerInfo = tree.xpath("//*[@id='merchant-info' and (contains(.,'amazon') or contains(.,'Amazon'))]/text()")
availability = tree.xpath("//*[@id='availability']//text()")
price = tree.xpath("//*[@id='priceblock_ourprice']/text()")
if(price == []):
amazonPrice = ""
amazon3rdPrice = ""
availCode = 0
for avail in availability:
if('in stock.' in avail.lower() or 'available now.' in avail.lower()):
availCode = 1
if('out of stock.' in avail.lower()):
availCode = 0
break
if(len(sellerInfo) > 0):
if(availCode == 1):
amazonStock = 1
else:
amazonStock = 0
try:
amazonPrice = price[0].replace("\n","").replace("$","").replace(",","").replace(" ","")
except:
amazonPrice = ""
amazon3rdStock = ""
amazon3rdPrice = ""
else:
if(availCode == 1):
amazon3rdStock = 1
else:
amazon3rdStock = 0
try:
amazon3rdPrice = price[0].replace("\n","").replace("$","").replace(",","").replace(" ","")
except:
amazon3rdPrice = ""
amazonStock = ""
amazonPrice = ""
successCode = 1
except Exception as e:
#print(e)
amazonTitle = ""
amazonStock = 0
amazon3rdStock = 0
amazonPrice = 0
amazon3rdPrice = 0
amazonImg = ""
amazonRating = ""
log_file.write(str(e)+"~"+amazonURL+'\n')
successCode = 0
temp_dict = {'success':successCode,'source':'Amazon','url':amazonURL,'imgUrl':amazonImg,'title':amazonTitle,'stock1':amazonStock,'stock2':amazon3rdStock,'price1':amazonPrice,'price2':amazon3rdPrice,'rating':amazonRating}
return(temp_dict)
#return 0
def scrapeBB(bestBuyURL):
bestBuyHeader = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding':'gzip, deflate, br',
'accept-language':'en-US,en;q=0.9',
'cache-control':'no-cache',
#'cookie':'UID=b2803c74-cd2d-442b-92a3-a1305b08790d; bm_sz=AFBD559F2D33F330B59D9F2795D58B79~YAAQXNs4fXfA0sdxAQAAlB0J5wdyXIq6BMRaa+/vA56paoU501tc/5VUeyAzUQUcx/X6su1aArRS4b26p0slRoARRt9vOs+3ZsatbYgLVhnq16Z93418SNzl6fVe+TGeLVSGRCs2SsD67rvUZyw0pd6W0OqErRyHRqQUpXZU/HCzkwKJ0QX0oDOasw48SuAS6Q==; bby_rdp=l; CTT=5a1db5cf92e0a778939891f37f27093c; SID=1edb586b-9933-40e5-927d-9f33bb3054d4; bby_cbc_lb=p-browse-e; _abck=CAE87DBEEB132521FBC9D6D0145CD8C3~0~YAAQXNs4fXrA0sdxAQAA4CAJ5wMAohF1u4WRkOzNVvTScfKt68/+OMYWbqRZBGtDKqcXVI/uOey9cp+k7t+eJW0yK5FxbHxaSEyPTlk+7LYLbSWC92mTo+XcVe0MR5905OgmNoEKSe8KcEYmQDnlIPvDPiuLRleqs+joPBg98OyS41jeeZsjOYWrlbKaAeRsmyGxaROgipBYg0GPCQBE7XqnQAw7w7C9uwgAH8SpQGUdeatXFTCi3wlZUsLq3WNWIVZLL9sEOCFyvU6GpTaHMU6xOVbVERYwU2EG59zblIuflC5YI58K62sv3VVWHQdmjQO8AugdoIo=~-1~-1~-1; AMCVS_F6301253512D2BDB0A490D45%40AdobeOrg=1; s_cc=true; vt=e82c7904-8f22-11ea-9279-06174cc609d2; intl_splash=false; ltc=%20; oid=468879744; optimizelyEndUserId=oeu1588719087091r0.16560520147911806; COM_TEST_FIX=2020-05-05T22%3A51%3A28.154Z; bby_prc_lb=p-prc-w; basketTimestamp=1588719091396; c2=Computers%20%26%20Tablets%3A%20PC%20Gaming%3A%20Gaming%20Laptops%3A%20pdp; bby_loc_lb=p-loc-e; locDestZip=96939; locStoreId=852; pst2=852; s_sq=%5B%5BB%5D%5D; bby_shpg_lb=p-shpg-e; sc-location-v2=%7B%22meta%22%3A%7B%22CreatedAt%22%3A%222020-05-05T22%3A52%3A55.662Z%22%2C%22ModifiedAt%22%3A%222020-05-05T22%3A52%3A56.767Z%22%2C%22ExpiresAt%22%3A%222021-05-05T22%3A52%3A56.767Z%22%7D%2C%22value%22%3A%22%7B%5C%22physical%5C%22%3A%7B%5C%22zipCode%5C%22%3A%5C%2296939%5C%22%2C%5C%22source%5C%22%3A%5C%22A%5C%22%2C%5C%22captureTime%5C%22%3A%5C%222020-05-05T22%3A52%3A55.660Z%5C%22%7D%2C%5C%22store%5C%22%3A%7B%5C%22zipCode%5C%22%3A%5C%2296701%5C%22%2C%5C%22storeId%5C%22%3A852%2C%5C%22storeHydratedCaptureTime%5C%22%3A%5C%222020-05-05T22%3A52%3A56.766Z%5C%22%7D%2C%5C%22destination%5C%22%3A%7B%5C%22zipCode%5C%22%3A%5C%2296939%5C%22%7D%7D%22%7D; bby_ispu_lb=p-ispu-e; AMCV_F6301253512D2BDB0A490D45%40AdobeOrg=1585540135%7CMCMID%7C59272685614388970976192452522290225300%7CMCAID%7CNONE%7CMCOPTOUT-1588726378s%7CNONE%7CvVersion%7C4.4.0',
'pragma':'no-cache',
'referer':'https://www.google.com/',
'sec-fetch-dest':'document',
'sec-fetch-mode':'navigate',
'sec-fetch-site':'same-origin',
'sec-fetch-user':'?1',
'upgrade-insecure-requests':'1',
'user-agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/81.0.4044.138 Safari/537.36'
}
bestBuyURL = bestBuyURL + "&intl=nosplash" ## aditional for access data
responseCode = 999
retryCount = 0
while(responseCode != 200):
try:
responseBh = requests.get(bestBuyURL, headers = bestBuyHeader)
responseCode = responseBh.status_code
except:
responseCode = 999
#print(responseCode)
if(responseCode !=200):
sleep(5)
print("Timeout")
retryCount = retryCount + 1
if(retryCount > 5):
responseAmazon = ""
break
#rUrl = responseBh.url
try:
tree = html.fromstring(responseBh.text)
try:
bestBuyTitle = tree.xpath("//div[@itemprop='name']/h1/text()")
bestBuyTitle = bestBuyTitle[0]
except:
bestBuyTitle = ""
try:
#bestBuyStock = tree.xpath("//div[@class='fulfillment-add-to-cart-button']//button[not (@disabled)]")
#x = bestBuyStock[0].xpath("./text()")
bestBuyStock = tree.xpath("//*[contains(@class,'add-to-cart-button') and contains(.,'Add to Cart')]/text()")
#print(testz)
#print(len(testz))
if(len(bestBuyStock)==1):
bestBuyStock = 1
else:
bestBuyStock = 0
except:
bestBuyStock = 0
try:
bestBuyPrice = tree.xpath("//div[@class='priceView-hero-price priceView-customer-price']/span/text()")
bestBuyPrice = bestBuyPrice[0].replace("\n","").replace("$","").replace(",","").replace(" ","")
except:
bestBuyPrice = 0
try:
bestBuyImg = tree.xpath("//img[@class='primary-image']/@src")
bestBuyImg = bestBuyImg[0]
except:
bestBuyImg = ""
try:
bestBuyRating = tree.xpath("//div[@class='user-generated-content-ugc-stats']//p[@class='sr-only']/following-sibling::i/@alt")
bestBuyRating = bestBuyRating[0]
except:
bestBuyRating = ""
successCode = 1
except Exception as e:
#print(e)
bestBuyTitle = ""
bestBuyStock = 0
bestBuyPrice = 0
bestBuyRating = ""
bestBuyImg = ""
log_file.write(str(e)+"~"+bestBuyURL+'\n')
successCode = 0
temp_dict = {'success':successCode,'source':'BestBuy','url':bestBuyURL,'imgUrl':bestBuyImg,'title':bestBuyTitle,'stock1':bestBuyStock,'stock2':"",'price1':bestBuyPrice,'price2':"",'rating':bestBuyRating}
return(temp_dict)
def scrapeBH(bhURL):
bhHeader = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
#'accept-encoding':'gzip, deflate, br',
'accept-language':'en-US,en;q=0.9',
'cache-control':'no-cache',
#cookie':'__cfduid=d4a6d3283c2256d147ae066627468848e1588553795; sessionKey=722c6da5-0f7b-4d83-b264-792ba95aa0d9; mapp=0; aperture-be-commit-id=n/a; uui=800.606.6969%20/%20212.444.6615|; locale=en; cartId=17786720147; cookieID=198264983731588553796083; dcid=1588553796065-83884464; D_IID=6A6A237C-2C24-3531-B886-FF91CB0D1DCB; D_UID=AF85F5DD-FA20-3C65-90BB-6399F4146D65; D_ZID=A5517FFD-FE03-30AA-A268-B9AAA5162C71; D_ZUID=63C70FD0-3CA5-351B-B950-8D005C05C863; D_HID=A02D8135-4804-3C53-AFBA-A5C3ED8FE638; D_SID=139.194.87.54:77Dt4QW/GW0BIvlbEufTsdS6BNjWkOgAf+rWtNKiSnk; ftr_ncd=6; aperture-be-commit-id=n/a; ab_nglp=A; TS0188dba5=01ec39615fd4e6c164cb3159b9ac2d75b633ca2331495cbc7ab9e229efceb25a992b371e554de125ea74f003b17a68534e94194cae31236344a68c482e220a36279ce8d4ee355e3963c5e3e93b2b67fed318a1aa5f42dc44904dc7324f09dd396d15ec9089; build=20200507v10-20200507v10; utkn=1bbf01004e07068c37d5fa0e67188dc; dpi=cat=2,cur=USD,app=D,lang=E,view=L,lgdin=N,cache=release-WEB-20200507v10-BHJ-DVB23488-13; lpi=cat=2,cur=USD,app=D,lang=E,view=L,lgdin=N,cache=release-WEB-20200507v10-BHJ-DVB23488-13,ipp=24,view=L,sort=BS,priv=N,state=; __cfruid=461a56b0bb682555c8935ee08eb2cf22a765375b-1588884414; SSID_C=CACk3B0OAAAAAABEaK9eGezDIERor14CAAAAAAAAAAAAvnO0XgANyCrRAAMUSR0AvnO0XgEA; SSSC_C=333.G6822786602306366489.2|53546.1919252; my_cookie=!L/xuWaRpTsZlPFEedo8zpbpxrFtb+i/IVM39DJumiDl23w0+9o8F9HFN7yvUXyeksJmE1ejqPAv6Jod8fkW9gZaT18xJyw0zNkmvYK8Eu/P1Rd3J27pCntf3yEw2yJ2EdIETB0CMeGRubi+EUCpp7jBloW5PHqIp8oiYWMB0xVZgBmZLAJ2K+oS6UNybkc7Qka0WSKmFDg==; JSESSIONID=KTbw5B7vL1GXDQixk1QMTu2JvmbkyJGb!-7551078; __cf_bm=706019855188078126d56423ab18434c342d90fb-1588884414-1800-ATg60f+q70XvV327X4lOFqdIBRibyvAsvT3va3yPmMShSe6n4o3y1pLZ2dQkdW8WkV3RJrSf8IB+cv8beGkNQlc=; ssTest=53546%3A1919252; TopBarCart=0|0; TS01d628c4=01ec39615faba9a1927162573f9e05ace9147f8549e8e82234d892065a97e2f331487a0fdb675529047cb678333ace446f05993ea96d5e24e8a375e19af0c4cf0cd904bb586b4f2fe1deb60c48bec3d183582dced9dd8f0d5619634e2cc7695fb6b612c7cba4493f44cab247f4dc50d1e9165f35d41fcec2c674ebf62e7cc7010c3fc3df27d7aa4b1af4771ea689484bc5d1ba897366683dbfa70298e74f71235719331a4272d57eb658bb805ac11acdcdab8d53d4cb94f46dcea09b3769b2e1718b6e20cb246d89b00804a3d1e3e003829c1b4ab9629783185bf48a78b32080964e045027; ft_ld_1h=1588884457548; forterToken=dd943fcb5daf4f0492988a2e607a0e76_1588884468080__UDF43_6; SSRT_C=-nO0XgIDAA; SSPV_C=yO0AAAAAAAQADgAAAAAAAAAAAAQAAAAAAAAAAAAA; TS01e1f1fd=01ec39615fb0510e07842cf22d1fc2197f996d7290e8e82234d892065a97e2f331487a0fdb7c012ddb25228ff35adcbd0f5fe04523a7f7074d094aeea8873b7cfd2fc095d83b2097d848e2cee7ad22b758e8bdd418505dac145f231499a37a66c0c82ba1aeba2fb9be085e4c254bb3d0007fffd17d2a124dd672f00cfb02d5fc73af0174453b25dbd8e03b20ae8a28779146996003; app_cookie=1588884475; TS0188dba5_77=080f850069ab2800d215d26fb2f201a727582ae42955cf5c5cdaed72852676970991336af136d4148ed5d0adbbb84b17084ebc34e1824000fd9af2033a1feb910be156ad3566703f6632819d210506351addc2f3374017b47a172186e497ef32163f0a48a5617ece04de8dd2413e24383ac5181c7a09c355
'pragma':'no-cache',
'referer':'https://www.bhphotovideo.com/',
'sec-fetch-dest':'document',
'sec-fetch-mode':'navigate',
'sec-fetch-site':'same-origin',
'sec-fetch-user':'?1',
'upgrade-insecure-requests':'1',
'user-agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/81.0.4044.138 Safari/537.36'
}
#responseBh = requests.get(bhURL, headers = bhHeader, timeout=None)
#print(responseBh.text)
#return 0
responseCode = 999
retryCount = 0
while(responseCode != 200):
try:
responseBh = requests.get(bhURL, headers = bhHeader, timeout=None)
responseCode = responseBh.status_code
#print(responseCode)
except:
responseCode = 999
if(responseCode !=200):
sleep(5)
print("Timeout")
retryCount = retryCount + 1
if(retryCount > 5):
responseAmazon = ""
break
#rUrl = responseBh.url
try:
tree = html.fromstring(responseBh.text)
try:
bhTitle = tree.xpath("//*[@data-selenium='productTitle']/text()")
bhTitle = bhTitle[0]
except:
bhTitle = ""
try:
bhStock = tree.xpath("//span[@data-selenium='stockStatus']/text()")
if(bhStock[0]=="In Stock"):
bhStock = 1
else:
bhStock = 0
except:
bhStock = 0
try:
bhPrice = tree.xpath("//*[@data-selenium='pricingPrice']/text()")
bhPrice = bhPrice[0].replace("\n","").replace("$","").replace(",","").replace(" ","")
except:
bhPrice = 0
try:
bhImg = tree.xpath("//img[@data-selenium='inlineMediaMainImage']/@src")
bhImg = bhImg[0]
except:
bhImg = ""
try:
bhRating1 = tree.xpath("//div[@data-selenium='ratingContainer']/svg")
bhRating2 = tree.xpath("//div[@data-selenium='ratingContainer']/svg[contains(@class,'full')]")
bhRating = len(bhRating2) + (len(bhRating1)-len(bhRating2))*0.5
#print(bhRating)
except:
bhRating = ""
successCode = 1
except Exception as e:
#print(e)
bhTitle = ""
bhStock = 0
bhPrice = 0
bhRating = ""
bhImg = ""
log_file.write(str(e)+"~"+bhURL+'\n')
successCode = 0
temp_dict = {'success':successCode,'source':'B&H','url':bhURL,'imgUrl':bhImg,'title':bhTitle,'stock1':bhStock,'stock2':"",'price1':bhPrice,'price2':"",'rating':bhRating}
return(temp_dict)
def runAll(dbCode,source,links):
if(source=="Bestbuy"):
temp = scrapeBB(links)
if(source=="Amazon"):
temp = scrapeAmazon(links)
if(source=="B&H"):
temp = scrapeBH(links)
print(temp)
if("amazon" in links):
keycode = re.findall(r'dp/([^/]+)',links)
keycode = keycode[0]
#print(keycode)
else:
keycode = links.replace("https://","").replace("http://","").replace("www.bhphotovideo.com/c/product","").replace("www.amazon.com","").replace("www.bestbuy.com/site","").replace("/","").strip()
keycode = keycode[0:11]
print(keycode)
dateTimeNow = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#print(keycode)
if(temp['stock1']==''):
newStock = temp['stock2']
print("ini stock 2")
else:
newStock = temp['stock1']
oldStock = 0
inputCode = 0
try:
cursor.execute('''SELECT * FROM scrap_data WHERE keycode = %s''',[keycode])
db.commit()
urlData = cursor.fetchall()
if(urlData == []):
inputCode = 1
else:
for dat in urlData:
if(dat[6]==""):
oldStock = dat[7]
if(dat[7]==""):
oldStock = dat[6]
#print(dat)
inputCode = 0
except Exception as e:
print(str(e))
print(oldStock)
print(newStock)
#KALAU STOCK BEDA UPDATE STOCK_CHANGE_DATE
if(int(oldStock)==int(newStock)):
print("SAMA WOI")
else:
print("BEDA WOI")
if(int(newStock) != int(oldStock) and inputCode==0):
print("a")
try:
cursor.execute('''
INSERT INTO scrap_data
(identifier ,keycode,url, source, img_url, title, stock_1, stock_2, price_1, price_2, rating, stock_change_date, refreshed_date)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
identifier = VALUES(identifier),
url = VALUES(url),
source = VALUES(source),
img_url = VALUES(img_url),
title = VALUES(title),
stock_1 = VALUES(stock_1),
stock_2 = VALUES(stock_2),
price_1 = VALUES(price_1),
price_2 = VALUES(price_2),
rating = VALUES(rating),
stock_change_date = VALUES(stock_change_date),
refreshed_date = VALUES(refreshed_date);
''',
(dbCode,keycode,temp['url'],temp['source'],temp['imgUrl'],temp['title'],temp['stock1'],temp['stock2'],temp['price1'],temp['price2'],temp['rating'],dateTimeNow,dateTimeNow) # python variables
)
db.commit()
except Exception as e:
print(str(e))
else:
print("b")
try:
cursor.execute('''
INSERT INTO scrap_data
(identifier ,keycode,url, source, img_url, title, stock_1, stock_2, price_1, price_2, rating, stock_change_date, refreshed_date)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
identifier = VALUES(identifier),
url = VALUES(url),
source = VALUES(source),
img_url = VALUES(img_url),
title = VALUES(title),
stock_1 = VALUES(stock_1),
stock_2 = VALUES(stock_2),
price_1 = VALUES(price_1),
price_2 = VALUES(price_2),
rating = VALUES(rating),
#stock_change_date = VALUES(stock_change_date),
refreshed_date = VALUES(refreshed_date);
''',
(dbCode,keycode,temp['url'],temp['source'],temp['imgUrl'],temp['title'],temp['stock1'],temp['stock2'],temp['price1'],temp['price2'],temp['rating'],dateTimeNow,dateTimeNow) # python variables
)
db.commit()
except Exception as e:
print(str(e))
"""
def scrapeAdorama(adoramaURL):
adoramaHeader={
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding':'gzip, deflate, br',
'accept-language':'en-US,en;q=0.9',
'cache-control':'no-cache',
'cookie':'akCountry=ID; js_sid=1; sid3=820d6794-6e6f-4d45-8aec-01e17012da4f; lastPrtcl=https%3A; PUID=820d6794-6e6f-4d45-8aec-01e17012da4f; Adorama_ABTestingFlag=389; _pxvid=4a4776af-8da0-11ea-a851-0242ac120008; sr_browser_id=321ff0ed-bfbe-455f-bdef-3e678bc6129f; needlepin=1588553019384; SSID=CADmKx0OAAAAAAA0Za9ezVECDTRlr14CAAAAAAAAAAAA1NmxXgDo-GDJAAFnrhsA1NmxXgEA; SSSC=500.G6822783234720551373.2|51552.1814119; IsLoggedIn=False; adivparam=adnh-f_isVip-f_isLoggedIn-f; VipCustomer=F; isVip360=F; ShowMap=0|0|0|0; PHPSESSID=4hrm78jms9ftmu20ffg8t62bg1; HumanSrch=Sigma%20100-400%20C; SSOD=AB5dAAAAEACZMg4AAgAAACfbsV6h27FeAAA; SSRT=4t-xXgADAA; sailthru_pageviews=1; sailthru_content=ce31079a76f661fd351c8525b4ceb460; sailthru_visitor=64b6a436-72d9-4a19-bb40-38193515cf63; a=b; InvisibleParameter=priceMode%3D0%7C0%7C1%7C0%7C1%26pagePriceMode%3D0%7C0%7C0%7C0%7C0%26country%3DID%26productVersion%3D1420%26perPage%3D15%26sort%3D%26descSort%3D%26isVip%3Dfalse%26isSRLoggedIn%3Dfalse%26isVip360%3Dfalse%26isLoggedIn%3Dfalse%26mode%3D%26isFreeShipPromo%3Dfalse%26clientUtcOffset%3D7%26isEduPlus%3Dfalse%26bankId%3D1; activeUser=1; _px2=eyJ1IjoiYTg2MTZiZTAtOGYxYS0xMWVhLWIyYzEtOGYzYTdhYWYzZTQxIiwidiI6IjRhNDc3NmFmLThkYTAtMTFlYS1hODUxLTAyNDJhYzEyMDAwOCIsInQiOjE1ODg3MTU2NDU4MTIsImgiOiI2OGIzYmU4MjhiM2M5NTM1MjY4NDA5Zjk3NTMxYTU4NjQzMzJiYzk1ODkyMjc2ZTIwMjRiMTUzNmFmNzM3N2Q4In0=',
'pragma':'no-cache',
'sec-fetch-dest':'document',
'sec-fetch-mode':'navigate',
'sec-fetch-site':'none',
'sec-fetch-user':'?1',
'upgrade-insecure-requests':'1',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36 OPR/67.0.3575.130',
}
#r = requests.get(adoramaURL)
#return(r.text)
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get("https://www.adorama.com/msigs66037.html")
form_element = driver.find_element_by_xpath("//h1/span").text
print(form_element)
return 0
"""
#shipping,review beloman
#print(sample_input['B&H'])
#test = scrapeAmazon("https://www.amazon.com/dp/B084B7GGNW")
#test = scrapeAmazon("https://www.amazon.com/dp/B07WTRXP7Y/ref=pd_vtpd_63_2/131-7347869-8361422?_encoding=UTF8&pd_rd_i=B07WTRXP7Y&pd_rd_r=ff2391ad-bbec-4713-a1eb-f94ff1fb400b&pd_rd_w=YO1JP&pd_rd_wg=WUPK4&pf_rd_p=be9253e2-366d-447b-83fa-e044efea8928&pf_rd_r=D755CGKGZ5RPQN8P76KE&psc=1&refRID=D755CGKGZ5RPQN8P76KE")
#print(test)
#test = scrapeAdorama("https://www.adorama.com/msigs66037.html") ## SKIPPED DUE TO ANTISCRIPT
#test = scrapeBB("https://www.bestbuy.com/site/msi-gs66-10sfs-15-6-laptop-intel-core-i7-32gb-memory-nvidia-geforce-rtx-2070-super-512gb-ssd-black-core/6408527.p?skuId=6408527")
#print(test)
#test = scrapeBH("https://www.bhphotovideo.com/c/product/1551636-REG/msi_gs66_stealth_10sfs_037_gs66_stealth_i7_10750h_rtx2070.html")
#print(test)
#print(db)
cursor.execute("""SELECT * FROM scrap_url""")
db.commit()
urlData = cursor.fetchall()
for dat in urlData:
urlList = []
dbCode = dat[0]
temp = dat[1].strip()
urlJson = simplejson.loads(temp)
urlList.append([dbCode,"Bestbuy",urlJson['Bestbuy']])
urlList.append([dbCode,"Amazon",urlJson['Amazon']])
urlList.append([dbCode,"B&H",urlJson['B&H']])
#print(urlList)
try:
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
futures = [executor.submit(runAll, url[0], url[1], url[2]) for url in urlList]
kwargs = {
'total': len(futures),
'unit': 'Pages',
'unit_scale': True,
'leave': True
}
for x in tqdm(as_completed(futures), **kwargs):
pass
except Exception as e:
print("Keywords error?")
log.write(e+"\n")
sleep(3)
#break
#print(test)
#print(test['Bestbuy'])
cursor.close()
db.close()
driver.quit()
| python |
import numpy as np
from math import radians
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def wind_rose(df, wd, nbins=16, xticks=8, plot=111, wind=True, ylim=False, yaxis=False, yticks=False):
"""
Return a wind rose.
Parameters
----------
df : DataFrame
The pandas DataFrame holding the data.
wd : str
Wind direction column name.
nbins : int, optional
Number of bins to plot, default is 16.
xticks : int {4, 8, 16} , optional
Number of xticks, default is 8.
plot : int, optional
nrows, ncols, index to define subplots, default is 111,
it is used to plot seasonal wind roses.
wind : bool, optional
Show cardinal directions (i.e. ['N', 'NE', ...]), defaults is True.
ylim : int or float, optional
Maximum limit for y-axis, default is False.
yaxis : int or flot, optional
Position of y-axis (0 - 360), default is False.
yticks : list-like, optional
List of yticks, default is False.
"""
labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'SSE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
# adjust wind direction (align North)
x = 360 - (180 / nbins)
w_dir = np.zeros(len(df[wd]))
for i in range(len(df[wd])):
if x <= df[wd][i] <= 360:
w_dir[i] = df[wd][i] - 360
else:
w_dir[i] = df[wd][i]
df['dir'] = w_dir
# histogram
bins = np.arange(- (180 / nbins), 360 + (180 / nbins), 360 / nbins)
n, bins = np.histogram(df.dir, bins=bins)
# wind rose
ax = plt.subplot(plot, projection='polar')
ax.bar([radians(x + (180 / nbins)) for x in bins][:-1],
n,
width=2 * np.pi / nbins)
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
plt.grid(axis='both', which='major', linestyle='--')
# categorical xticklabels
if xticks == 4:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels([x for x in labels[::4]])
elif xticks == 8:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels([x for x in labels[::2]])
elif xticks == 16:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels(labels)
else:
raise Exception("xticks should be 4, 8, or 16")
# y axis limit
if ylim:
plt.ylim(0, ylim)
# y axis position
if yaxis:
ax.set_rlabel_position(yaxis)
# y axis ticks
if yticks:
ax.set_yticks(yticks)
return
def wind_rose_season(df, wd, nbins=16, xticks=8, wind=True, south=True, ylim=False, yaxis=False, yticks=False):
"""
Return a wind rose for each season.
Parameters
----------
df : DataFrame
The pandas DataFrame holding the data.
wd : str
Wind direction column name.
nbins : int, optional
Number of bins to plot, default is 16.
xticks : int {4, 8, 16} , optional
Number of xticks, default is 8.
wind : bool, optional
Show cardinal directions (i.e. ['N', 'NE', ...]), defaults is True.
south : bool, optional, default is True
If True, seasons are calculated to Southern Hemisphere, otherwise Northern Hemisphere.
ylim : int or float, optional
Maximum limit for y-axis, default is False.
yaxis : int or flot, optional
Position of y-axis (0 - 360), default is False.
yticks : list-like, optional
List of yticks, default is False.
"""
# create a new column season
if south:
df['season'] = ((df.index.month % 12 + 3) // 3).map({1: 'Summer', 2: 'Autumn', 3: 'Winter', 4: 'Spring'})
else:
df['season'] = ((df.index.month % 12 + 3) // 3).map({1: 'Winter', 2: 'Spring', 3: 'Summer', 4: 'Autumn'})
# windroses
for i, season in enumerate(df['season'].unique()):
df_season = df.copy()
df_season = df_season.loc[df_season['season'] == season]
wind_rose(df_season, wd, nbins=nbins, xticks=xticks, wind=wind, plot=221+i,
ylim=ylim, yaxis=yaxis, yticks=yticks)
plt.title(season + '\n', fontsize=14, fontweight='bold')
plt.tight_layout()
return
def wind_rose_scatter(df, ws, wd, xticks=8, size=2, plot=111, wind=True, ylim=False, yaxis=False, yticks=False):
"""
Return a scatter polar plot of wind speed.
Parameters
----------
df : DataFrame
The pandas DataFrame holding the data.
ws : str
Wind speed column name.
wd : str
Wind direction column name.
xticks : int {4, 8, 16} , optional
Number of xticks, default is 8.
size : int or float, default os 2
Size of scatter points.
plot : int, optional
nrows, ncols, index to define subplots, default is 111,
it is used to plot seasonal wind roses.
wind : bool, optional
Show cardinal directions (i.e. ['N', 'NE', ...]), defaults is True.
ylim : int or float, optional
Maximum limit for y-axis, default is False.
yaxis : int or flot, optional
Position of y-axis (0 - 360), default is False.
yticks : list-like, optional
List of yticks, default is False.
"""
labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'SSE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
# windrose
ax = plt.subplot(plot, projection='polar')
ax.scatter(x=[radians(x) for x in df[wd].values], y=df[ws].values, s=size)
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
plt.grid(axis='both', which='major', linestyle='--')
# categorical xticklabels
if xticks == 4:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels([x for x in labels[::4]])
elif xticks == 8:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels([x for x in labels[::2]])
elif xticks == 16:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels(labels)
else:
raise Exception("xticks should be 4, 8, or 16")
# y axis limit
if ylim:
plt.ylim(0, ylim)
# y axis position
if yaxis:
ax.set_rlabel_position(yaxis)
# y axis ticks
if yticks:
ax.set_yticks(yticks)
return
def wind_rose_scatter_season(df, ws, wd, xticks=8, size=2, wind=True, south=True,
ylim=False, yaxis=False, yticks=False):
"""
Return a scatter polar plot of wind speed.
Parameters
----------
df : DataFrame
The pandas DataFrame holding the data.
ws : str
Wind speed column name.
wd : str
Wind direction column name.
xticks : int {4, 8, 16} , optional
Number of xticks, default is 8.
size : int or float, default os 2
Size of scatter points.
wind : bool, optional
Show cardinal directions (i.e. ['N', 'NE', ...]), defaults is True.
south : bool, optional, default is True
If True, seasons are calculated to Southern Hemisphere, otherwise Northern Hemisphere.
ylim : int or float, optional
Maximum limit for y-axis, default is False.
yaxis : int or flot, optional
Position of y-axis (0 - 360), default is False.
yticks : list-like, optional
List of yticks, default is False.
"""
# create a new column season
if south:
df['season'] = ((df.index.month % 12 + 3) // 3).map({1: 'Summer', 2: 'Autumn', 3: 'Winter', 4: 'Spring'})
else:
df['season'] = ((df.index.month % 12 + 3) // 3).map({1: 'Winter', 2: 'Spring', 3: 'Summer', 4: 'Autumn'})
# windroses
for i, season in enumerate(df['season'].unique()):
df_season = df.copy()
df_season = df_season.loc[df_season['season'] == season]
wind_rose_scatter(df_season, ws=ws, wd=wd, xticks=xticks, size=size, plot=221+i, wind=wind,
ylim=ylim, yaxis=yaxis, yticks=yticks)
plt.title(season + '\n', fontsize=14, fontweight='bold')
plt.tight_layout()
return
def wind_rose_speed(df, ws, wd, nbins=16, xticks=8, plot=111, wind=True, ylim=False, yaxis=False, yticks=False,
lims=False, loc='lower left'):
"""
Return a wind rose with wind speed ranges.
Parameters
----------
df : DataFrame
The pandas DataFrame holding the data.
ws : str
Wind speed column name.
wd : str
Wind direction column name.
nbins : int, optional
Number of bins to plot, default is 16.
xticks : int {4, 8, 16} , optional
Number of xticks, default is 8.
plot : int, optional
nrows, ncols, index to define subplots, default is 111,
it is used to plot seasonal wind roses.
wind : bool, optional
Show cardinal directions (i.e. ['N', 'NE', ...]), defaults is True.
ylim : int or float, optional
Maximum limit for y-axis, default is False.
yaxis : int or flot, optional
Position of y-axis (0 - 360), default is False.
yticks : list-like, optional
List of yticks, default is False.
lims : list-like, optional, default is False.
Wind speed ranges.
loc : int or str, optional, default is 'lower left'
Legend location.
"""
labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'SSE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
# adjust wind direction (align North)
x = 360 - (180 / nbins)
w_dir = np.zeros(len(df[wd]))
for i in range(len(df[wd])):
if x <= df[wd][i] <= 360:
w_dir[i] = df[wd][i] - 360
else:
w_dir[i] = df[wd][i]
df['dir'] = w_dir
# bins
bins = np.arange(- (180 / nbins), 360 + (180 / nbins), 360 / nbins)
# default wind speed limits
if not lims:
lims = np.linspace(df[ws].min(), df[ws].max(), num=5, endpoint=False)
lims = np.append(lims, df.ws.max())
# matrix to store n values for all ranges
ns = np.zeros((len(bins) - 1, len(lims) - 1))
# histogram
for i in range(len(lims) - 1):
ds = df.copy()
if i == len(lims) - 2:
ds = ds[(df[ws] >= lims[i]) & (ds[ws] <= lims[i + 1])]
else:
ds = ds[(df[ws] >= lims[i]) & (ds[ws] < lims[i + 1])]
n, bins = np.histogram(ds.dir, bins=bins)
ns[:, i] = n
if np.sum(ns) != df.dir.count():
print("Warning: wind speed range does not cover all data")
# windrose
ax = plt.subplot(plot, projection='polar')
for i in range(len(lims) - 1):
ax.bar([radians(x + (180 / nbins)) for x in bins][:-1],
np.sum(ns[:, 0:len(lims) - 1 - i], axis=1),
width=2 * np.pi / nbins,
label="{:.1f}".format(lims[len(lims) - 1 - i - 1]) + ' - ' +
"{:.1f}".format(lims[len(lims) - 1 - i]))
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
plt.grid(axis='both', which='major', linestyle='--')
# categorical xticklabels
if xticks == 4:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels([x for x in labels[::4]])
elif xticks == 8:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels([x for x in labels[::2]])
elif xticks == 16:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels(labels)
else:
raise Exception("xticks should be 4, 8, or 16")
# y axis limit
if ylim:
plt.ylim(0, ylim)
# y axis position
if yaxis:
ax.set_rlabel_position(yaxis)
# y axis ticks
if yticks:
ax.set_yticks(yticks)
# legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc=loc)
return
def wind_rose_speed_season(df, ws, wd, nbins=16, xticks=8, wind=True, south=True, ylim=False, yaxis=False, yticks=False,
lims=False, loc='lower left'):
"""
Return a wind rose with wind speed ranges for each season.
Parameters
----------
df : DataFrame
The pandas DataFrame holding the data.
ws : str
Wind speed column name.
wd : str
Wind direction column name.
nbins : int, optional
Number of bins to plot, default is 16.
xticks : int {4, 8, 16} , optional
Number of xticks, default is 8.
wind : bool, optional
Show cardinal directions (i.e. ['N', 'NE', ...]), defaults is True.
south : bool, optional, default is True
If True, seasons are calculated to Southern Hemisphere, otherwise Northern Hemisphere.
ylim : int or float, optional
Maximum limit for y-axis, default is False.
yaxis : int or flot, optional
Position of y-axis (0 - 360), default is False.
yticks : list-like, optional
List of yticks, default is False.
lims : list-like, optional, default is False.
Wind speed ranges.
loc : int or str, optional, default is 'lower left'
Legend location.
"""
# create a new column season
if south:
df['season'] = ((df.index.month % 12 + 3) // 3).map({1: 'Summer', 2: 'Autumn', 3: 'Winter', 4: 'Spring'})
else:
df['season'] = ((df.index.month % 12 + 3) // 3).map({1: 'Winter', 2: 'Spring', 3: 'Summer', 4: 'Autumn'})
# windroses
for i, season in enumerate(df['season'].unique()):
df_season = df.copy()
df_season = df_season.loc[df_season['season'] == season]
wind_rose_speed(df_season, ws, wd, nbins=nbins, xticks=xticks, wind=wind, plot=221+i,
ylim=ylim, yaxis=yaxis, yticks=yticks, lims=lims, loc=loc)
plt.title(season + '\n', fontsize=14, fontweight='bold')
plt.tight_layout()
return
def wind_rose_pollution(df, var, ws, wd, var_label, cmap='viridis', nbins=16, min_bin=1, contrib=False,
xticks=8, plot=111, z_values=None, wind=True, yaxis=False, lims=False):
"""
Return a wind rose for pollutant concentration.
Parameters
----------
df : DataFrame
The pandas DataFrame holding the data.
var : str
Pollutant column name.
ws : str
Wind speed column name.
wd : str
Wind direction column name.
var_label : str
Pollutant label.
cmap : str or Colormap
A Colormap instance or registered colormap name, default is 'viridis'.
nbins : int, optional
Number of bins to plot, default is 16.
min_bin : int, optional
The minimum number of points allowed in a wind speed/wind
direction bin, default is 1.
contrib : bool, optional
If true, return the percentage of contribution of each segment,
concentration of the pollutant weighted by wind speed/direction,
default is False.
xticks : int {4, 8, 16} , optional
Number of xticks, default is 8.
plot : int, optional
nrows, ncols, index to define subplots, default is 111,
it is used to plot seasonal wind roses.
z_values : list-like, optional, default is None
Min and max values for z values (colorbar).
wind : bool, optional
Show cardinal directions (i.e. ['N', 'NE', ...]), defaults is True.
yaxis : int or flot, optional
Position of y-axis (0 - 360), default is False.
lims : list-like, optional, default is False.
Wind speed ranges.
"""
labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'SSE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
# adjust wind direction (align North)
x = 360 - (180 / nbins)
w_dir = np.zeros(len(df[wd]))
for i in range(len(df[wd])):
if x <= df[wd][i] <= 360:
w_dir[i] = df[wd][i] - 360
else:
w_dir[i] = df[wd][i]
df['dir'] = w_dir
# bins
bins = np.arange(- (180 / nbins), 360 + (180 / nbins), 360 / nbins)
# default wind speed limits
if not lims:
lims = np.linspace(df[ws].min(), df[ws].max(), num=5, endpoint=False)
lims = np.append(lims, df.ws.max())
# matrix to store concentration values for all ranges
ns = np.zeros((len(lims) - 1, len(bins) - 1))
# histogram
# wind speed ranges
for i in range(len(lims) - 1):
ds = df.copy()
if i == len(lims) - 2:
ds = ds[(ds[ws] >= lims[i]) & (ds[ws] <= lims[i + 1])]
else:
ds = ds[(ds[ws] >= lims[i]) & (ds[ws] < lims[i + 1])]
# wind direction bins
for j in range(len(bins) - 1):
ds = ds[(ds['dir'] >= bins[j]) & (ds['dir'] < bins[j + 1])]
if ds[var].count() >= min_bin:
if contrib and min_bin == 1:
weight = ds[var].mean() / df[(df[ws] > 0) | (~np.isnan(df[wd]))][var].mean()
ns[i, j] = 100 * weight * ds[var].count() / df[(df[ws] > 0) | (~np.isnan(df[wd]))][var].count()
elif contrib and min_bin > 1:
raise Exception("to use contrib option, min_bin must be 1")
else:
ns[i, j] = ds[var].mean()
else:
ns[i, j] = np.nan
ds = df.copy()
if i == len(lims) - 2:
ds = ds[(ds[ws] >= lims[i]) & (ds[ws] <= lims[i + 1])]
else:
ds = ds[(ds[ws] >= lims[i]) & (ds[ws] < lims[i + 1])]
# windrose
ax = plt.subplot(plot, projection='polar')
if z_values:
cf = ax.pcolormesh(np.radians(bins),
lims, ns,
shading='flat', zorder=0,
vmin=z_values[0],
vmax=z_values[1],
cmap=cmap)
else:
cf = ax.pcolormesh(np.radians(bins),
lims, ns,
shading='flat', zorder=0,
cmap=cmap)
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
if contrib:
tick_format = '%.0f%%'
else:
tick_format = None
cbar = plt.colorbar(cf, ax=ax, pad=0.1, shrink=0.75, format=tick_format)
cbar.set_label(var_label)
ax.set_yticks(lims)
bbox = dict(boxstyle="round", ec=None, fc="white", alpha=0.5)
plt.setp(ax.get_yticklabels(), bbox=bbox)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
ax.set_xticks(np.radians(np.arange((180 / nbins), 360 + (180 / nbins), 360 / nbins)), minor=True)
plt.grid(axis='x', which='minor', linestyle='-', linewidth=0.25)
plt.grid(axis='y', which='major', linestyle='-', linewidth=0.55)
# categorical xticklabels
if xticks == 4:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels([x for x in labels[::4]])
elif xticks == 8:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels([x for x in labels[::2]])
elif xticks == 16:
ax.set_xticks([radians(x) for x in np.arange(0, 360, 360 / xticks)])
if wind:
ax.set_xticklabels(labels)
else:
raise Exception("xticks should be 4, 8, or 16")
# y axis position
if yaxis:
ax.set_rlabel_position(yaxis)
return
def wind_rose_pollution_season(df, var, ws, wd, var_label, cmap='viridis', nbins=16, min_bin=1, contrib=False,
xticks=8, z_values=None, wind=True, south=True, yaxis=False, lims=False):
"""
Return a wind rose for pollutant concentration for each season.
Parameters
----------
df : DataFrame
The pandas DataFrame holding the data.
var : str
Pollutant column name.
ws : str
Wind speed column name.
wd : str
Wind direction column name.
var_label : str
Pollutant label.
cmap : str or Colormap
A Colormap instance or registered colormap name, default is 'viridis'.
nbins : int, optional
Number of bins to plot, default is 16.
min_bin : int, optional
The minimum number of points allowed in a wind speed/wind
direction bin, default is 1.
contrib : bool, optional
If true, return the percentage of contribution of each segment,
concentration of the pollutant weighted by wind speed/direction,
default is False.
xticks : int {4, 8, 16} , optional
Number of xticks, default is 8.
z_values : list-like, optional, default is None
Min and max values for z values (colorbar).
wind : bool, optional
Show cardinal directions (i.e. ['N', 'NE', ...]), defaults is True.
south : bool, optional, default is True
If True, seasons are calculated to Southern Hemisphere, otherwise Northern Hemisphere.
yaxis : int or flot, optional
Position of y-axis (0 - 360), default is False.
lims : list-like, optional, default is False.
Wind speed ranges.
"""
# create a new column season
if south:
df['season'] = ((df.index.month % 12 + 3) // 3).map({1: 'Summer', 2: 'Autumn', 3: 'Winter', 4: 'Spring'})
else:
df['season'] = ((df.index.month % 12 + 3) // 3).map({1: 'Winter', 2: 'Spring', 3: 'Summer', 4: 'Autumn'})
# windroses
for i, season in enumerate(df['season'].unique()):
df_season = df.copy()
df_season = df_season.loc[df_season['season'] == season]
wind_rose_pollution(df_season, var, ws, wd, var_label, cmap=cmap, nbins=nbins, min_bin=min_bin, contrib=contrib,
xticks=xticks, plot=221+i, z_values=z_values, wind=wind, yaxis=yaxis, lims=lims)
plt.title(season + '\n', fontsize=14, fontweight='bold')
plt.tight_layout()
return
| python |
import pytest
import pystiche_papers.johnson_alahi_li_2016 as paper
@pytest.fixture(scope="package")
def styles():
return (
"composition_vii",
"feathers",
"la_muse",
"mosaic",
"starry_night",
"the_scream",
"udnie",
"the_wave",
)
@pytest.fixture(scope="package")
def vgg_load_weights_mock(package_mocker):
return package_mocker.patch(
"pystiche.enc.models.vgg.VGGMultiLayerEncoder._load_weights"
)
@pytest.fixture(scope="package", autouse=True)
def multi_layer_encoder_mock(package_mocker, vgg_load_weights_mock):
multi_layer_encoder = paper.multi_layer_encoder()
def new(impl_params=None):
multi_layer_encoder.empty_storage()
return multi_layer_encoder
return package_mocker.patch(
"pystiche_papers.johnson_alahi_li_2016._loss._multi_layer_encoder", new,
)
| python |
a = "Paul Sinatra"
print(a.count("a"))
print(a.count("a",0,10))
print(a.endswith("tra"))
print(a.endswith("ul",1,8))
print(a.find("a"))
print(a.find("a",2,10))
print(len(a))
print(a.lower())
print(max(a))
print(min(a))
print(a.replace("a","b"))
print(a.split(" "))
print(a.strip())
print(a.upper()) | python |
"""Token constants."""
# Auto-generated by Tools/scripts/generate_token.py
__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
LBRACE = 25
RBRACE = 26
EQEQUAL = 27
NOTEQUAL = 28
LESSEQUAL = 29
GREATEREQUAL = 30
TILDE = 31
CIRCUMFLEX = 32
LEFTSHIFT = 33
RIGHTSHIFT = 34
DOUBLESTAR = 35
PLUSEQUAL = 36
MINEQUAL = 37
STAREQUAL = 38
SLASHEQUAL = 39
PERCENTEQUAL = 40
AMPEREQUAL = 41
VBAREQUAL = 42
CIRCUMFLEXEQUAL = 43
LEFTSHIFTEQUAL = 44
RIGHTSHIFTEQUAL = 45
DOUBLESTAREQUAL = 46
DOUBLESLASH = 47
DOUBLESLASHEQUAL = 48
AT = 49
ATEQUAL = 50
RARROW = 51
ELLIPSIS = 52
COLONEQUAL = 53
OP = 54
AWAIT = 55
ASYNC = 56
TYPE_IGNORE = 57
TYPE_COMMENT = 58
# These aren't used by the C tokenizer but are needed for tokenize.py
ERRORTOKEN = 59
COMMENT = 60
NL = 61
ENCODING = 62
N_TOKENS = 63
# Special definitions for cooperation with parser
NT_OFFSET = 256
tok_name = {value: name
for name, value in globals().items()
if isinstance(value, int) and not name.startswith('_')}
__all__.extend(tok_name.values())
EXACT_TOKEN_TYPES = {
'!=': NOTEQUAL,
'%': PERCENT,
'%=': PERCENTEQUAL,
'&': AMPER,
'&=': AMPEREQUAL,
'(': LPAR,
')': RPAR,
'*': STAR,
'**': DOUBLESTAR,
'**=': DOUBLESTAREQUAL,
'*=': STAREQUAL,
'+': PLUS,
'+=': PLUSEQUAL,
',': COMMA,
'-': MINUS,
'-=': MINEQUAL,
'->': RARROW,
'.': DOT,
'...': ELLIPSIS,
'/': SLASH,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'/=': SLASHEQUAL,
':': COLON,
':=': COLONEQUAL,
';': SEMI,
'<': LESS,
'<<': LEFTSHIFT,
'<<=': LEFTSHIFTEQUAL,
'<=': LESSEQUAL,
'=': EQUAL,
'==': EQEQUAL,
'>': GREATER,
'>=': GREATEREQUAL,
'>>': RIGHTSHIFT,
'>>=': RIGHTSHIFTEQUAL,
'@': AT,
'@=': ATEQUAL,
'[': LSQB,
']': RSQB,
'^': CIRCUMFLEX,
'^=': CIRCUMFLEXEQUAL,
'{': LBRACE,
'|': VBAR,
'|=': VBAREQUAL,
'}': RBRACE,
'~': TILDE,
}
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
| python |
from django.contrib.auth.models import User
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.parsers import JSONParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import DocumentationRenderer
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from rest_framework.status import HTTP_201_CREATED
from rest_framework.status import HTTP_204_NO_CONTENT
from rest_framework.viewsets import GenericViewSet
from rest_framework.viewsets import ViewSet
from dependencies import Injector
from dependencies import operation
from dependencies import this
from dependencies import value
from dependencies.contrib.rest_framework import api_view
from dependencies.contrib.rest_framework import generic_api_view
from dependencies.contrib.rest_framework import generic_view_set
from dependencies.contrib.rest_framework import model_view_set
from dependencies.contrib.rest_framework import view_set
from django_project.api.auth import AuthenticateAdmin
from django_project.api.auth import AuthenticateAll
from django_project.api.commands import UserCreateOperations
from django_project.api.commands import UserDestroyOperations
from django_project.api.commands import UserOperations
from django_project.api.commands import UserUpdateOperations
from django_project.api.filtersets import use_filterset_name
from django_project.api.filtersets import UserFilter
from django_project.api.metadata import DenyMetadata
from django_project.api.negotiation import DenyNegotiation
from django_project.api.serializers import UserSerializer
from django_project.api.throttle import ThrottleEveryOne
from django_project.api.version import DenyVersion
@api_view
class UserAction(Injector):
"""Intentionally left blank."""
post = this.command.do
command = UserOperations
renderer_classes = (DocumentationRenderer,)
parser_classes = (JSONParser,)
@api_view
class UserLogin(Injector):
get = this.command.login
command = UserOperations
permission_classes = (IsAuthenticated,)
@api_view
class LoginAll(Injector):
get = this.command.respond
command = UserOperations
authentication_classes = (AuthenticateAll,)
permission_classes = (IsAuthenticated,)
@api_view
class ThrottleAll(Injector):
get = this.command.login
command = UserOperations
throttle_classes = (ThrottleEveryOne,)
@api_view
class DefaultThrottleScope(Injector):
get = this.command.respond
command = UserOperations
throttle_scope = "throttle_scope"
@api_view
class CustomThrottleScope(Injector):
get = this.command.respond
command = UserOperations
custom_throttle_scope = "custom_scope"
@api_view
class BadNegotiation(Injector):
get = this.command.respond
command = UserOperations
content_negotiation_class = DenyNegotiation
@api_view
class BadVersion(Injector):
get = this.command.respond
command = UserOperations
versioning_class = DenyVersion
@api_view
class BadMetadata(Injector):
get = this.command.respond
command = UserOperations
metadata_class = DenyMetadata
@generic_api_view
class UserRetrieveView(Injector):
"""Intentionally left blank."""
get = this.command.retrieve
command = UserOperations
queryset = User.objects.all()
serializer_class = UserSerializer
lookup_field = "username"
lookup_url_kwarg = "nick"
@generic_api_view
class UserListView(Injector):
get = this.command.collection
command = UserOperations
queryset = User.objects.all()
serializer_class = UserSerializer
filter_backends = (DjangoFilterBackend,)
if use_filterset_name:
filterset_class = UserFilter
else:
filter_class = UserFilter
pagination_class = LimitOffsetPagination
@generic_api_view
class UserListFilterFieldsView(Injector):
get = this.command.collection
command = UserOperations
queryset = User.objects.all()
serializer_class = UserSerializer
filter_backends = (DjangoFilterBackend,)
if use_filterset_name:
filterset_fields = ["username"]
else:
filter_fields = ["username"]
pagination_class = LimitOffsetPagination
# ViewSet.
@view_set
class InjectedViewSet(Injector):
"""Intentionally left blank."""
@operation # noqa: A003
def list(view, request, args, kwargs, user, action):
assert isinstance(view, ViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {}
assert action == "list"
return Response(status=HTTP_200_OK, data={"list": "ok"})
@operation
def retrieve(view, request, args, kwargs, user, pk, action):
assert isinstance(view, ViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {"pk": "1"}
assert pk == "1"
assert action == "retrieve"
return Response(status=HTTP_200_OK, data={"retrieve": "ok"})
@operation
def create(view, request, args, kwargs, user, action):
assert isinstance(view, ViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {}
assert action == "create"
return Response(status=HTTP_201_CREATED, data={"create": "ok"})
@operation
def update(view, request, args, kwargs, user, pk, action):
assert isinstance(view, ViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {"pk": "1"}
assert pk == "1"
assert action == "update"
return Response(status=HTTP_200_OK, data={"update": "ok"})
@operation
def partial_update(view, request, args, kwargs, user, pk, action):
assert isinstance(view, ViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {"pk": "1"}
assert pk == "1"
assert action == "partial_update"
return Response(status=HTTP_200_OK, data={"partial_update": "ok"})
@operation
def destroy(view, request, args, kwargs, user, pk, action):
assert isinstance(view, ViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {"pk": "1"}
assert pk == "1"
assert action == "destroy"
return Response(status=HTTP_204_NO_CONTENT)
# GenericViewSet.
@generic_view_set
class InjectedGenericViewSet(Injector):
serializer_class = UserSerializer
@operation # noqa: A003
def list(view, request, args, kwargs, user, action):
assert isinstance(view, GenericViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {}
assert action == "list"
return Response(status=HTTP_200_OK, data={"list": "ok"})
@operation
def retrieve(view, request, args, kwargs, user, pk, action):
assert isinstance(view, GenericViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {"pk": "1"}
assert pk == "1"
assert action == "retrieve"
return Response(status=HTTP_200_OK, data={"retrieve": "ok"})
@operation
def create(view, request, args, kwargs, user, action, validated_data):
assert isinstance(view, GenericViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {}
assert action == "create"
assert validated_data == {
"username": "johndoe",
"first_name": "John",
"last_name": "Doe",
}
return Response(status=HTTP_201_CREATED, data={"create": "ok"})
@operation
def update(view, request, args, kwargs, user, pk, action, validated_data):
assert isinstance(view, GenericViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {"pk": "1"}
assert pk == "1"
assert action == "update"
assert validated_data == {
"username": "johndoe",
"first_name": "John",
"last_name": "Doe",
}
return Response(status=HTTP_200_OK, data={"update": "ok"})
@operation
def partial_update(view, request, args, kwargs, user, pk, action, validated_data):
assert isinstance(view, GenericViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {"pk": "1"}
assert pk == "1"
assert action == "partial_update"
assert validated_data == {"username": "jimworm"}
return Response(status=HTTP_200_OK, data={"partial_update": "ok"})
@operation
def destroy(view, request, args, kwargs, user, pk, action):
assert isinstance(view, GenericViewSet)
assert isinstance(request, Request)
assert args == ()
assert kwargs == {"pk": "1"}
assert pk == "1"
assert action == "destroy"
return Response(status=HTTP_204_NO_CONTENT)
@model_view_set
class UserViewSet(Injector):
"""Intentionally left blank."""
authentication_classes = (AuthenticateAdmin,)
queryset = User.objects.filter(username="johndoe")
serializer_class = UserSerializer
create = this.create_command.create
update = this.update_command.update
destroy = this.destroy_command.destroy
create_command = UserCreateOperations
update_command = UserUpdateOperations
destroy_command = UserDestroyOperations
@model_view_set
class DynamicUserViewSet(Injector):
authentication_classes = (AuthenticateAdmin,)
@value
def queryset(user, action):
assert action in {"list", "retrieve", "update", "partial_update", "destroy"}
assert user.username == "admin"
return User.objects.filter(username="johndoe")
serializer_class = UserSerializer
create = this.create_command.create
update = this.update_command.update
destroy = this.destroy_command.destroy
create_command = UserCreateOperations
update_command = UserUpdateOperations
destroy_command = UserDestroyOperations
@model_view_set
class EmptyViewSet(Injector):
queryset = User.objects.all()
serializer_class = UserSerializer
| python |
from __future__ import print_function
import os
import sys
import curses
import txtsh.log as log
class Cursor(object):
def __init__(self):
self.y = 4
self.x = 0
self.up_limit = 4
self.down_limit = 4
def display(self):
curses.setsyx(self.y, self.x)
curses.doupdate()
class Explorer(object):
"""
A curses-based file explorer.
Returns the path of a file selected.
"""
def __init__(self):
# Status codes
self.STOP = 0
self.GO = 1
self.CHOSEN = 2
# Directory aliases
self.ROOTDIR = '/'
self.HOMEDIR = os.path.expanduser("~")
# Key aliases
self.KEY_QUIT = ord('q')
self.KEY_CHOOSE_FILE = ord('c')
self.KEY_UP = [curses.KEY_UP, ord('k')]
self.KEY_DOWN = [curses.KEY_DOWN, ord('j')]
self.KEY_LEFT = [curses.KEY_LEFT, ord('h')]
self.KEY_RIGHT = [curses.KEY_RIGHT, ord('l')]
# Create a cursor
self.curs = Cursor()
# Start the screen
self.create_scr()
self.path = self.HOMEDIR + '/'
self.ls = os.listdir(self.path)
self.num_listings = len(self.ls)
self.curs.down_limit = self.num_listings + 3
self.current_file = self.ls[self.curs.y - 4]
def create_scr(self):
"""
Start the curses screen.
"""
self.scr = curses.initscr()
curses.noecho()
curses.curs_set(1)
self.scr.keypad(1)
def navigate(self):
"""
The driver function for the file explorer.
Returns the path of a selected file to the txtsh shell.
"""
status = self.GO
c = None
while status == self.GO:
self.scr.erase()
self.scr.addstr(0, 0, "TXTSH FILE EXPLORER")
self.scr.addstr(1, 0, "'c' to choose file, \
'q' to quit without choosing.")
self.list_dir()
self.curs.display()
self.current_file = self.ls[self.curs.y - 4]
c = self.scr.getch()
(status, path) = self.manage_input(c)
curses.endwin()
return path
def manage_input(self, key):
"""
Return status and file path if CHOSEN
to Explorer.navigate(). status is one of GO, STOP, or CHOSEN
"""
status = self.GO
path = None
if key == self.KEY_QUIT:
status = self.STOP
path = None
elif key == self.KEY_CHOOSE_FILE:
status = self.CHOSEN
path = self.path + '/' + self.current_file
elif key in self.KEY_UP:
if self.curs.y == self.curs.up_limit:
pass
else:
self.curs.y -= 1
elif key in self.KEY_DOWN:
if self.curs.y == self.curs.down_limit:
pass
else:
self.curs.y += 1
elif key in self.KEY_RIGHT:
self.build_path()
self.curs.y = 4
elif key in self.KEY_LEFT:
self.shrink_path()
self.curs.y = 4
else:
pass
return (status, path)
def list_dir(self):
"""
List the contents of the current directory
in the explorer screen.
"""
max_y = self.scr.getmaxyx()[0] - 5
# Get the current directory listing.
try:
self.ls = os.listdir(self.path)
# Filter the directory listing to not include hidden files.
self.ls = filter(lambda f: not f.startswith('.'), self.ls)
self.num_listings = len(self.ls)
except Exception:
log.write(traceback=True)
return
# Display the directory listing.
if self.num_listings == 0:
self.scr.addstr(4, 0, '*EMPTY*')
else:
self.scr.addstr(2, 0, self.path)
for i in xrange(len(self.ls)):
if i < max_y:
self.scr.addstr(i + 4, 0, self.ls[i])
self.scr.noutrefresh()
self.curs.down_limit = self.num_listings + 3
def build_path(self):
self.path = os.path.join(self.path, self.current_file)
def shrink_path(self):
self.path = os.path.abspath(os.path.join(self.path, '..'))
if __name__ == '__main__':
expl = Explorer()
path = expl.navigate()
if path:
print(path)
| python |
#!/usr/bin/pypy
from sys import *
from random import *
T, n, p, K0 = map(int, argv[1:]);
print T
for i in xrange(T):
E = []
mark = []
for i in xrange(2, n * p / 100 + 1):
E.append((randint(1, i - 1), i))
mark.append(0)
for i in xrange(n * p / 100 + 1, n + 1):
j = randrange(0, len(E))
u, v = E[j]
if not mark[j]:
E.append((u, i))
mark.append(1)
mark[j] = 1
else:
E[j] = (u, i)
E.append((i, v))
mark.append(1)
shuffle(E)
print n, len(E), randint(0, K0)
print '\n'.join(map(lambda x: "%s %s" % x, E))
| python |
"""
devl.py -- The more cross-platform makefile substitute with setup tools
Usage
=====
For first-time setup of this file, type 'python devl.py'
How to Add Functionality
========================
User Variables
--------------
To add a new user variable, you should
- Update the ``user_variables`` dict in this module with the new key and a default value
This will make the variable available in calls to vformat() and format_and_call()
Commands
--------
To add a new command, you should
- Update the ``commands`` dict in this module
- The key is the name of the command
- The value is list containing other command names and/or functions which the command depends upon
- Prefix any new command functions with 'r_' (for 'run') for consistency
- Add a description of any new command functions using the `describe` decorator
- Avoid pipes, redirects, or otherwise bash-y things for our Windows friends. Instead, just use multiple commands
and rely on Python's baked-in io functionality. See r_dumpdata() and r_pylint() for examples.
"""
import subprocess
import sys
import json
import os
import glob
# The values below are just defaults and will be superceded by devl_settings.json
user_variables = {
"PYTHON": "python3",
"PIP": "sudo pip3",
"MANAGE": "python3 manage.py",
"DEV_DATA_DIR": "fixtures",
"PORT": "8000",
}
if os.name == "nt":
# We're in Windows, so we'll try different defaults
user_variables.update(
{"PYTHON": "python", "PIP": "pip", "MANAGE": "python manage.py"}
)
PROMPT = "\n{question}\n({options}) {default}\n> "
help_commands_list = None # Leave this empty, it is auto-populated
# Helper Functions
def vformat(space_separated):
"""Format a string with global variables"""
return space_separated.format(**user_variables)
def format_and_call(space_separated, **kwargs):
"""Format global variables within ``space_separated`` and then subprocess.run the string, separated by spaces.
Passes ``kwargs`` to subprocess.run. Returns the output of subprocess.run.
Outputs the command executed similar to how make does it.
Note this can be used even if there are no user_variable format specifiers.
Arguments:
space_separated (str): Space-separated command to run
kwargs: (dict): Dict to pass to subprocess.run as **kwargs
"""
cmdstr = vformat(space_separated)
print(cmdstr)
return subprocess.run(cmdstr.split(" "), **kwargs)
def just_call(complete_arguments, **kwargs):
"""Same functionality as subprocess.run, but prints the command it is executing ahead of time.
This function is different than format_and_call in that it doesn't attempt to substitute user_variables values
and it takes a list of arguments, not a space-separated string.
Arguments:
complete_arguments (list): First parameter of subprocess.run
kwargs (dict): Passed as **kwargs to subprocess.run
"""
print(" ".join(complete_arguments))
return subprocess.run(complete_arguments, **kwargs)
def glob_as_args(glob_str, **kwargs):
"""Outputs a file glob as a verbose space-separated argument string for maximum crossplatforminess.
Arguments:
glob_str (str): A valid glob string used as the first argument to glob.glob
kwargs (dict): Keyword arguments to pass to glob.glob
"""
return " ".join(glob.glob(glob_str, **kwargs))
def prompt_user(
question,
options=("y", "n"),
case_sensitive=False,
default_first=True,
accept_any=False,
):
"""Prompts the user with the given question, providing them with the given options
Re-prompts the user until the response is in `options` unless accept_any=False
If options not provided, defaults to y/n.
Returns the user's input (in lowercase if case_sensitive=False)
Arguments:
question (str): The prompt for the user
options (iter(str)): (optional) The tuple/iterable of possible options to answer
case_sensitive (boolean): (optional) Whether or not the responses are case-sensitive
default_first (boolean): (optional) Whether or not an empty input should default to the first option
accept_any (boolean): Whether to check if the response is in the options. Automatically set to true if no options provided.
"""
ops = ""
if options:
ops = "/".join(options)
elif accept_any:
ops = "any response accepted"
assert (
ops != ""
), "prompt_user(): No options are provided, but kwarg accept_any is false. This is probably an error."
default = ""
if options and default_first:
default = " [{default}]".format(default=options[0])
prompt = PROMPT.format(question=question, options=ops, default=default)
result = input(prompt)
if default_first and result == "":
result = options[0]
valid_inputs = options if case_sensitive else [o.lower() for o in options]
while (
result if case_sensitive else result.lower()
) not in valid_inputs and not accept_any:
print("Invalid option.")
result = input(prompt)
return result if case_sensitive else result.lower()
def update_user_vars():
"""Updates `user_vars` with the contents of devl_settings.json"""
with open("devl_settings.json", "r") as settings_file:
try:
user_variables.update(json.load(settings_file))
except json.decoder.JSONDecodeError as e:
print(
"There is an error in your devl_setting.json file. Please fix it and try re-running your command."
)
print("The output is as follows:")
print(e.msg)
should_regen = prompt_user(
"Would you like to just regenerate the file by running the setup script again?"
"\nThe command you tried to execute will not run upon completion, so you have time to change the defaults."
)
if should_regen == "y":
run("setup_devl")
else:
print(
"Okay, devl.py will now exit. Please fix the JSON file and try re-running the command."
)
exit(0)
def describe(description):
"""Decorator used to add descriptions to command functions. Descriptions should be succinct and written in the imperative
Arguments:
description (str): A description of the actions the function takes
"""
def wrapper(cmd_fn):
cmd_fn._help_description_ = description
return cmd_fn
return wrapper
def description_of(cmd_str):
"""Determines the description of the given command by chaining together descriptions of all functions it ends up calling
If a description cannot be found, uses the name of the function.
Arguments:
cmd_str (str): The name of the command to generate a description of
"""
descs = [
description_of(c)
if isinstance(c, str)
else "\t\t* " + getattr(c, "_help_description_", c.__name__)
for c in commands[cmd_str]
]
return "\n".join(descs)
# Command Function Helpers
def download_pip():
"""Tries to use get-pip.py to download pip"""
try:
from urllib import request
from urllib.error import URLError
except ImportError:
print(
"Unfortunately your python installation does not have urllib, so you will have to install pip manually."
)
print('I\'d reccomend googling "how to install pip on [YOUR PLATFORM]"')
print(
"Once you have done so, re-run this script. You may have to restart your computer for the changes to take effect."
)
print("devl.py will now exit.")
exit(0)
try:
get_pip = request.urlopen("https://bootstrap.pypa.io/get-pip.py").read()
except URLError:
print(
"There was an issue fetching get-pip.py. Are you connected to the internet?"
)
print("If not, connect to the internet and re-run this script after doing so.")
print(
"If you are connected yet this is not working, you will probably have to manually install pip."
)
print(
'I\'d reccomend googling "how to install pip on [YOUR PLATFORM]" if that is the case.'
)
print('Once you have done so, re-run "python devl.py"')
print("devl.py will now exit.")
exit(0)
with open("get-pip.py", "wb") as gp_file:
gp_file.write(get_pip)
print("Successfully downloaded get_pip.py")
# Command Functions
@describe("Install all Python requirements using pip")
def r_requirements():
devreqs = os.path.join("requirements", "dev.txt")
format_and_call("{PIP} install -r " + devreqs)
@describe(
"Load fixture data files from fixture directory to database. To ignore a json file, prepend with '.' to hide. To load files in order, prepend 'dev_data...' with the next index starting from 1."
)
def r_loaddata():
files = sorted(os.listdir(user_variables["DEV_DATA_DIR"]), key=os.path.basename)
for f in files:
if f.endswith(".json") and not f.startswith("."):
format_and_call("{MANAGE} loaddata " + os.path.join("fixtures", f))
@describe("Write current database as a fixture")
def r_dumpdata():
complete = format_and_call(
"{MANAGE} dumpdata --natural-foreign -e contenttypes -e auth.Permission",
stdout=subprocess.PIPE,
)
with open(user_variables["DEV_DATA"], "w") as f:
f.write(complete.stdout.text)
@describe("Migrate database with --run-syncdb")
def r_syncdb():
format_and_call("{MANAGE} migrate --run-syncdb")
@describe("Collect static files")
def r_static():
format_and_call("{MANAGE} collectstatic --noinput")
@describe("Compile SASS files, then run collectstatic")
def r_static_prod():
format_and_call("{MANAGE} compilescss")
format_and_call("{MANAGE} collectstatic --noinput")
@describe("Migrate the database")
def r_migrate():
format_and_call("{MANAGE} migrate")
@describe("Run the dev server")
def r_quickrun():
format_and_call("{MANAGE} runserver 0.0.0.0:{PORT}")
@describe("Run tests (not implemented)")
def r_test():
format_and_call("{MANAGE} test")
@describe("Run the pycodestyle checks")
def r_pycodestyle():
format_and_call("pycodestyle common --max-line-length=120")
apps_files = glob_as_args("apps/*")
format_and_call(
"pycodestyle "
+ apps_files
+ " --exclude='apps/*/migrations/*' --max-line-length=120"
)
scripts_path = os.path.abspath("../scripts")
format_and_call("pycodestyle " + scripts_path + " --max-line-length=120")
@describe("Run the pylint checks")
def r_pylint():
format_and_call("pylint common")
apps_files = glob_as_args("apps/*")
format_and_call("pylint " + apps_files)
scripts = os.path.abspath("../scripts")
format_and_call("pylint " + scripts + " --max_line_length=120")
@describe("Create new migrations based on changes to Models")
def r_newmigrations():
format_and_call("{MANAGE} makemigrations")
print(
"Before you commit, remember to rename your migration with a description of what it does (but keep its numerical designator)"
)
print(
'Example: Change "003_auto_20170101_1942.py" to something like "003_add_blacklist.py"'
)
@describe("Create a local admin account")
def r_superuser():
format_and_call("{MANAGE} createsuperuser")
@describe("Open the database shell prompt")
def r_dbshell():
format_and_call("{MANAGE} dbshell")
@describe("Start an interactive python session with convenience imports")
def r_shell():
format_and_call("{MANAGE} shell")
@describe("Destroy the Python environment (not implemented)")
def r_destroyenv():
print("Destructive commands are not yet implemented")
# TODO Destroy the entire python environment based on requirements.txt like the Makefile did?
@describe("Clean static files (not implemented)")
def r_clean():
print("Destructive commands are not yet implemented")
@describe("Display help text")
def r_help():
print(
"devl.py -- The more cross-platform make-substitute for the Sigma Pi Gamma Iota website"
)
print()
print("Usage:")
print("\t$ python devl.py COMMAND")
print()
print("Available commands are:")
print()
assert (
help_commands_list is not None
), "There was an issue populating the list of possible commands"
for c in help_commands_list:
print("\t" + c)
print(description_of(c))
print()
@describe("Run the environment setup script")
def r_setup_devl():
working_settings = {}
print(
"Welcome to devl.py -- The module that makes working with your dev environment suck a little less\n"
)
print(
'This setup script can be re-accessed at any time after it is complete using "python devl.py setup_devl"'
)
if not (sys.version_info > (3, 5)):
print(
"First things first, you need to install a newer version of python. "
"Head over to python.org/downloads and get the latest version for your system."
)
print(
"Once you have done so, add it to your system path and re-run this script using the new version"
)
print("devl.py will now exit. See you soon!")
exit(0)
else:
print(
"Congratulations on running this script with a compatible version of python!"
)
working_settings["PYTHON"] = sys.executable
if not os.path.isfile("devl_settings.json"):
print(
"It has been detected that you have not yet set up devl.py. So let's do that!"
)
print("Let's see if you have pip installed...")
try:
import pip
print("Success!")
except ModuleNotFoundError:
print("It looks like this python installation does not have pip.")
should_install = prompt_user(
"Would you like devl.py to try to install pip for you?"
" You should not use this option if you installed this python interpereter using a package manager such as apt."
' If you did, just answer "n" and then run "sudo apt install python3-pip" (Ubuntu) or whatever the equivalent on your system is.'
" That will allow your pip installation to be managed by your package manager."
" If you are on Windows or Mac you should probably answer yes, unless you use a package manager such as choco or homebrew,"
" and that package manager has pip3 available."
)
if should_install == "y":
download_pip()
print("Installing pip...")
if os.name == "nt":
format_and_call(working_settings["PYTHON"] + " get-pip.py")
else:
format_and_call("sudo " + working_settings["PYTHON"] + " get-pip.py")
else:
print(
"Okay, please install pip for this interpreter and re-run devl.py once you have done so."
)
print("devl.py will now exit.")
exit(0)
if os.name == "nt":
working_settings["PIP"] = working_settings["PYTHON"] + " -m pip"
else:
working_settings["PIP"] = "sudo " + working_settings["PYTHON"] + " -m pip"
working_settings["MANAGE"] = working_settings["PYTHON"] + " manage.py"
user_variables.update(working_settings)
with open("devl_settings.json", "w") as settings_file:
settings_file.write(json.dumps(user_variables, indent=4))
print("\%\%\%\% NOTICE \%\%\%\%")
print(
"A file called devl_settings.json has been placed in this directory. It contains the default settings determined from this setup process."
)
print(
"If anything in devl.py doesn't work after this, you probably need to change something in that file."
)
print(
"Since you have the proper python toolchain installed, we can now set up the dev environment."
)
should_run_dev = prompt_user(
'Would you like to run `dev` now install your environment? (This can be done in the future using "python devl.py dev")'
)
if os.name == "nt":
print(
"Since you are running Windows, you may need to install Visual Studio Dev Tools from https://visualstudio.microsoft.com/downloads/ in order to finish."
)
print(
'We\'ll go ahead and try running dev anyways. If it fails, go to the above link and install "Tools for Visual Studio".'
)
print('Then, restart your computer, and then run "python devl.py dev"')
if should_run_dev == "y":
run("dev")
else:
print(
'Setup is finished! Use "python devl.py dev" if you would like to install the environment in the future.'
)
exit(0)
print("Setup is finished! Check the output above to see if it succeeded.")
print('Use "python devl.py help" to see what else devl.py can do.')
print("devl.py will now exit.")
exit(0) # Explicit exit in case we're here due to malformed JSON
# Core Functionality
commands = {
"dev": ["requirements", "loaddata", "static"],
"requirements": [r_requirements],
"loaddata": ["syncdb", r_loaddata],
"dumpdata": ["syncdb", r_dumpdata],
"syncdb": [r_syncdb],
"run": ["static", "migrate", "quickrun"],
"static": [r_static],
"static_prod": [r_static_prod],
"migrate": [r_migrate],
"quickrun": [r_quickrun],
"test": [r_test],
"quality": ["pycodestyle", "pylint"],
"pycodestyle": [r_pycodestyle],
"pylint": [r_pylint],
"newmigrations": [r_newmigrations],
"superuser": [r_superuser],
"dbshell": [r_dbshell],
"shell": [r_shell],
"destroyenv": ["clean", r_destroyenv],
"clean": [r_clean],
"help": [r_help],
"setup_devl": [r_setup_devl],
}
help_commands_list = sorted(list(commands.keys()))
def run_command(command, executed_functions):
"""Runs the given named command or function.
Functions are evaluated one time at maximum per call
Arguments:
command (str): The name of a command to run
executed_functions (set): A set containing any command functions which have already been called
"""
try:
reqs = commands[command]
except KeyError:
sys.stderr.write('[devl.py] Invalid command "{0}"\n\n'.format(str(command)))
raise
for r in reqs:
try:
if r not in executed_functions:
r()
executed_functions.add(r)
except TypeError:
assert isinstance(
r, str
), "Command list values must be callables or strings"
run_command(r, executed_functions)
except KeyboardInterrupt:
pass
def run(command):
"""Shortcut for first-time call of run_command()"""
run_command(command, set())
if __name__ == "__main__":
if os.path.isfile("devl_settings.json"):
update_user_vars()
if len(sys.argv) == 2:
run(sys.argv[1])
else:
sys.stderr.write(
'[devl.py] Invalid argument format. Use "python devl.py help" for more information.\n'
)
else:
if len(sys.argv) == 2:
sys.stderr.write(
"[devl.py] Notice -- devl.py has not yet been set up. Using default user variables. To fix this, run `python devl.py` with no arguments.\n\n"
)
run(sys.argv[1])
else:
run("setup_devl")
| python |
# Copyright 2021 Ash Hellwig <[email protected]> (https://ash.ashwigltd.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import click
# from ..classes.BargenInstance import BargenInstance
# @click.command()
# @click.option('-a', '--author-name', 'authorname')
# def cli(authorname):
# sourceconfig = BargenInstance()
# sourceconfig.authorname = authorname
# """Example Script"""
# click.echo('Running CLI from scripts/cli.py')
# click.echo('Using config:')
# click.echo('Author Name: ' + sourceconfig.authorname)
# click.echo('Now, lets check if we got the song titles...')
# print('----------------')
# print('Found the following song titles: ')
# print(sourceconfig.getsonglinks())
# print('----------------')
| python |
"""Init module."""
from . import contypes, exceptions, models, repository, service
from .controller import controller
__all__ = [
"controller",
"contypes",
"exceptions",
"models",
"repository",
"service",
]
| python |
import argparse
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
parser = argparse.ArgumentParser(description="test global argument parser")
# script parameters
parser.add_argument('-g', '--GT_PATH', default='gt/gt.zip', help="Path of the Ground Truth file.")
parser.add_argument('-s', '--SUBMIT_PATH', default='submit.zip', help="Path of your method's results file.")
# webserver parameters
parser.add_argument('-o', '--OUTPUT_PATH', default='output/', help="Path to a directory where to copy the file that contains per-sample results.")
parser.add_argument('-p', '--PORT', default=8080, help='port number to show')
# result format related parameters
parser.add_argument('--BOX_TYPE', default='QUAD', choices=['LTRB', 'QUAD', 'POLY'])
parser.add_argument('--TRANSCRIPTION', action='store_true')
parser.add_argument('--CONFIDENCES', action='store_true')
parser.add_argument('--CRLF', action='store_true')
# end-to-end related parameters
parser.add_argument('--E2E', action='store_true')
parser.add_argument('--CASE_SENSITIVE', default=True, type=str2bool)
parser.add_argument('--RS', default=True, type=str2bool)
# evaluation related parameters
parser.add_argument('--AREA_PRECISION_CONSTRAINT', type=float, default=0.5)
parser.add_argument('--GRANULARITY_PENALTY_WEIGHT', type=float, default=1.0)
parser.add_argument('--VERTICAL_ASPECT_RATIO_THRES', default=2.0)
# other parameters
parser.add_argument('-t', '--NUM_WORKERS', default=2, type=int, help='number of threads to use')
parser.add_argument('--GT_SAMPLE_NAME_2_ID', default='([0-9]+)')
parser.add_argument('--DET_SAMPLE_NAME_2_ID', default='([0-9]+)')
parser.add_argument('--PER_SAMPLE_RESULTS', default=True, type=str2bool)
PARAMS = parser.parse_args()
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
default_args = {'owner': 'afroot05', 'retries': 2, 'retry_delay': timedelta(minutes=1)}
dag = DAG('Sync_dayquota_am_task',
default_args=default_args,
schedule_interval='40 9 * * *',
catchup=False,
start_date=datetime(2021, 5, 17, 9, 40))
sync_dayquota = BashOperator(task_id="sync_dayquota", bash_command="sh /lib/carter/dbsync/scripts/sync_dayquota.sh ",
dag=dag)
kdalpha_am_start_task = BashOperator(task_id="kdalpha_am_start_task", bash_command="sh /usr/lib/carter/kd_strategy/script/kdalpha_am_start_task.sh prod ", dag=dag)
kdalpha_daily_am_task = BashOperator(task_id="kdalpha_daily_am_task", bash_command="sh /usr/lib/carter/kd_strategy/script/kdalpha_strategy_daily_am_task.sh prod ", dag=dag)
kdalpha_strategy_rank_task = BashOperator(task_id="kdalpha_strategy_rank_task", bash_command="sh /usr/lib/carter/kd_strategy/script/kdalpha_strategy_rank_task.sh prod ", dag=dag)
kdalpha_am_end_task = BashOperator(task_id="kdalpha_am_end_task", bash_command="sh /usr/lib/carter/kd_strategy/script/kdalpha_am_end_task.sh prod ", dag=dag)
sync_dayquota >> kdalpha_am_start_task
kdalpha_strategy_rank_task >> [kdalpha_am_end_task]
kdalpha_am_start_task >> [kdalpha_daily_am_task]
kdalpha_daily_am_task >> [kdalpha_strategy_rank_task] | python |
from pointing.envs import SimplePointingTask
from pointing.users import CarefulPointer
from pointing.assistants import ConstantCDGain, BIGGain
from coopihc.bundle import PlayNone, PlayAssistant
import matplotlib.pyplot as plt
# ===================== First example =====================
# task = SimplePointingTask(gridsize = 31, number_of_targets = 8)
# binary_user = CarefulPointer()
# unitcdgain = ConstantCDGain(1)
#
# bundle = PlayNone(task, binary_user, unitcdgain)
# game_state = bundle.reset()
# bundle.render('plotext')
# while True:
# sum_rewards, is_done, rewards = bundle.step()
# bundle.render('plotext')
# if is_done:
# bundle.close()
# break
# ===================== Second example =====================
# task = SimplePointingTask(gridsize = 31, number_of_targets = 10)
# user = CarefulPointer()
# assistant = ConstantCDGain(1)
#
# bundle = PlayAssistant(task, user, assistant)
#
# game_state = bundle.reset()
# bundle.render('plotext')
# # The heuristic is as follows: Start with a high gain. The user should always give the same action. If at some point it changes, it means the user went past the target and that the cursor is very close to the target. If that is the case, divide the gain by 2, but never less than 1.
#
# # Start off with a high gain
# gain = 4
# # init for the adaptive algorithm
# sign_flag = game_state["user_action"]['action']['human_values'][0]
# observation = game_state
# _return = 0
# while True:
# # Check whether the user action changed:
# sign_flag = sign_flag * observation["user_action"]['action']['human_values'][0]
# # If so, divide gain by 2
# if sign_flag == -1:
# gain = max(1,gain/2)
# # Apply assistant action
# observation, sum_rewards, is_done, rewards = bundle.step([gain])
# _return += sum_rewards
# bundle.render('plotext')
#
# if is_done:
# bundle.close()
# break
#
# print(_return)
# ================= Third example ======================
task = SimplePointingTask(gridsize=31, number_of_targets=10, mode="position")
binary_user = CarefulPointer()
BIGpointer = BIGGain()
bundle = PlayNone(task, binary_user, BIGpointer)
game_state = bundle.reset()
bundle.render("plotext")
plt.tight_layout()
while True:
sum_rewards, is_done, rewards = bundle.step()
bundle.render("plotext")
if is_done:
break
| python |
from os.path import join
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import argparse
from repro_lap_reg.load_param_seq_results import load_param_seq_results
from repro_lap_reg.viz_seq import plot_param_vs_metric_by_model
from repro_lap_reg.viz_utils import savefig
from repro_lap_reg.utils import join_and_make
parser = argparse.ArgumentParser(description='Visualiztions for experiments'
'with n_samples sequence.')
parser.add_argument('--out_data_dir',
default='out_data/',
help='Directory for output data.')
parser.add_argument('--results_dir',
default='results/',
help='Directory where visualizations should be saved.')
parser.add_argument('--kind', default='covar',
help='What kind of model are we looking at.')
parser.add_argument('--base_name', default='meow',
help='Base name to identify simulations.')
args = parser.parse_args()
# args.kind = 'means_est'
# args.base_name = 'bsize=5_2'
show_std = True
dpi = 100
inches = 12
fontsize = 32 # 22
tick_font_size = 20
plt.rc('legend', fontsize=fontsize)
param = 'n_samples'
param_title = "Number of samples"
metrics = ['L2_rel', 'support_error']
metric_best_ordering = {'L2': 'min',
'support_error': 'min',
'L2_rel': 'min'}
metric_ylims = {'L2': 0,
'support_error': (0, 1),
'L2_rel': (0, 1)}
metric_titles = {'L2_rel': 'Relative L2 error',
'L2': 'L2 error',
'support_error': 'Support error'}
#########
# paths #
#########
results_dir = args.results_dir
out_data_dir = args.out_data_dir
out_data_dir = join(out_data_dir, args.kind)
save_dir = join_and_make(results_dir, 'for_paper')
name_stub = '{}__{}'.format(args.kind, args.base_name)
def get_block_size_title(name):
#TODO: this won't work for more sophosticated names
block_size_str = name.split('bsize=')[1]
vals = block_size_str.split('_')
assert len(vals) in [2, 3]
if len(vals) == 2:
n_nodes = vals[0]
n_blocks = vals[1]
return "{} blocks with {} nodes".format(n_blocks, n_nodes)
elif len(vals) == 3:
n_nodes = vals[0]
n_blocks = vals[1]
n_iso = vals[2]
return "{} blocks with {} nodes and {} isolated vertices".\
format(n_blocks, n_nodes, n_iso)
title_stub = get_block_size_title(args.base_name)
##########################
# load and parse results #
##########################
out = load_param_seq_results(base_name=args.base_name,
param=param,
out_data_dir=out_data_dir)
fit_results = out['fit']
path_results = out['path']
if args.kind == 'means_est':
defualt_init_name = '10-CV hard-threshold init'
all_models = [
'fclsp__init=default__steps=2',
'fclsp__init=default__steps=convergence',
'fclsp__init=0__steps=3',
'fclsp__init=0__steps=convergence',
'fclsp__init=empirical__steps=2',
'fclsp__init=empirical__steps=convergence',
'thresh__kind=hard',
#'thresh__kind=soft',
'empirical'
]
figsize = (inches, 1.5 * inches)
elif args.kind == 'covar':
defualt_init_name = '10-CV hard-threshold init'
all_models = [
'fclsp__init=default__steps=2',
'thresh__kind=hard',
# 'thresh__kind=soft'
'empirical'
]
# all_models = [
# 'fclsp__init=default__steps=2',
# 'fclsp__init=default__steps=convergence',
# 'fclsp__init=0__steps=3',
# 'fclsp__init=0__steps=convergence',
# 'fclsp__init=empirical__steps=2',
# 'fclsp__init=empirical__steps=convergence',
# 'thresh__kind=hard',
# 'thresh__kind=soft',
# 'empirical'
# ]
figsize = (inches, inches)
elif args.kind == 'lin_reg':
defualt_init_name = '10-CV Lasso init'
all_models = [
'fclsp__init=default__steps=2',
'fcp__init=default__steps=1',
'lasso'
]
# all_models = [
# 'fclsp__init=default__steps=2',
# 'fclsp__init=default__steps=convergence',
# 'fclsp__init=0__steps=3',
# 'fclsp__init=0__steps=convergence',
# 'fclsp__init=empirical__steps=2',
# 'fclsp__init=empirical__steps=convergence',
# 'fcp__init=default__steps=1',
# 'fcp__init=default__steps=convergence',
# 'fcp__init=0__steps=2',
# 'fcp__init=0__steps=convergence',
# 'fcp__init=empirical__steps=1',
# 'fcp__init=empirical__steps=convergence',
# 'lasso'
# ]
figsize = (inches, inches)
elif args.kind == 'log_reg':
defualt_init_name = '10-CV Lasso init'
all_models = [
'fclsp__init=default__steps=2',
'fcp__init=default__steps=1',
'lasso'
]
# all_models = [
# 'fclsp__init=default__steps=2',
# 'fclsp__init=default__steps=convergence',
# 'fclsp__init=0__steps=3',
# 'fclsp__init=0__steps=convergence',
# 'fclsp__init=empirical__steps=2',
# 'fclsp__init=empirical__steps=convergence',
# 'fcp__init=default__steps=1',
# 'fcp__init=default__steps=convergence',
# 'fcp__init=0__steps=2',
# 'fcp__init=0__steps=convergence',
# 'fcp__init=empirical__steps=1',
# 'fcp__init=empirical__steps=convergence',
# 'lasso'
# ]
figsize = (inches, inches)
model_color_seq = sns.color_palette(palette='colorblind', n_colors=4)
fclsp_sub_colors = sns.light_palette(color=model_color_seq[0],
n_colors=2+1,
reverse=True)[:-1]
fcp_sub_colors = sns.light_palette(color=model_color_seq[1],
n_colors=2+1,
reverse=True)[:-1]
info = {}
markers = {'fclsp': '.',
'fcp': 'X',
'0': '$O$',
'empirical': '$e$',
'other': '.'
}
# FCLSP
info['fclsp__init=default__steps=2'] = {
'name': 'FCLS, 2 LLA steps,\n {}'.format(defualt_init_name),
'color': fclsp_sub_colors[0],
'ls': '-',
'marker': markers['fclsp'],
'path': True
}
info['fclsp__init=default__steps=convergence'] = {
'name': 'FCLS, LLA converge,\n {}'.format(defualt_init_name),
'color': fclsp_sub_colors[1],
'ls': '-',
'marker': markers['fclsp'],
'path': True
}
info['fclsp__init=0__steps=3'] = {
'name': 'FCLS, 3 LLA steps,\n init at 0',
'color': fclsp_sub_colors[0],
'ls': '-',
'marker': markers['0'],
'path': True
}
info['fclsp__init=0__steps=convergence'] = {
'name': 'FCLS, LLA converge,\n init at 0',
'color': fclsp_sub_colors[1],
'ls': '-',
'marker': markers['0'],
'path': True
}
info['fclsp__init=empirical__steps=2'] = {
'name': 'FCLS, 2 LLA steps,\n empirical init',
'color': fclsp_sub_colors[0],
'ls': '-',
'marker': markers['empirical'],
'path': True
}
info['fclsp__init=empirical__steps=convergence'] = {
'name': 'FCLS, LLA converge,\n empirical init',
'color': fclsp_sub_colors[1],
'ls': '-',
'marker': markers['empirical'],
'path': True
}
# FCP
info['fcp__init=default__steps=1'] = {
'name': 'SCAD, 1 LLA step,\n {}'.format(defualt_init_name),
'color': fcp_sub_colors[0],
'ls': '--',
'marker': '$S$', # markers['fcp'],
'path': True
}
info['fcp__init=default__steps=convergence'] = {
'name': 'SCAD, LLA converge,\n {}'.format(defualt_init_name),
'color': fcp_sub_colors[1],
'ls': '--',
'marker': markers['fcp'],
'path': True
}
info['fcp__init=0__steps=2'] = {
'name': 'SCAD, 2 LLA steps,\n init at 0',
'color': fcp_sub_colors[0],
'ls': '--',
'marker': markers['0'],
'path': True
}
info['fcp__init=0__steps=convergence'] = {
'name': 'SCAD, LLA converge,\n init at 0',
'color': fcp_sub_colors[1],
'ls': '--',
'marker': markers['0'],
'path': True
}
info['fcp__init=empirical__steps=1'] = {
'name': 'Entrywise SCAD, 1 LLA step,\n empirical init',
'color': fcp_sub_colors[0],
'ls': '--',
'marker': markers['empirical'],
'path': True
}
info['fcp__init=empirical__steps=convergence'] = {
'name': 'SCAD, LLA converge,\n empirical init',
'color': fcp_sub_colors[1],
'ls': '--',
'marker': markers['empirical'],
'path': True
}
#########
# Other #
#########
info['lasso'] = {
'name': 'Lasso',
'color': model_color_seq[3],
'ls': '-.',
'marker': '$L$',
'path': False
}
info['thresh__kind=hard'] = {
'name': 'hard-thresholding',
'color': fcp_sub_colors[0],
'ls': '--',
'marker': '$H$',
'path': True
}
info['thresh__kind=soft'] = {
'name': 'soft-thresholding',
'color': model_color_seq[3],
'ls': '-.',
'marker': markers['other'],
'path': True
}
info['empirical'] = {
'name': 'Empirical',
'color': model_color_seq[3],
'ls': '-.',
'marker': '$E$', # markers['other'],
'path': False
}
for k in list(info.keys()):
if k not in all_models:
del info[k]
formal_names = {k: info[k]['name'] for k in info.keys()}
model_ls = {k: info[k]['ls'] for k in info.keys()}
model_markers = {k: info[k]['marker'] for k in info.keys()}
model_colors = {k: info[k]['color'] for k in info.keys()}
path_models = [k for k in info.keys() if info[k]['path']]
fit_models = [k for k in info.keys() if not info[k]['path']]
###############
# make figure #
###############
metric = 'L2_rel'
vs = 'oracle'
# pull out the best result for the tuning path
best_results = path_results.\
query("vs == @vs").\
groupby(['model', 'mc_idx', 'n_samples'])[metric].\
agg(metric_best_ordering[metric]).\
reset_index()
# get results for models we actually are going to plot
results_to_plot = pd.concat([best_results.query('model in @all_models'),
fit_results.query("vs == @vs and model in @fit_models")]).\
reset_index()
plt.figure(figsize=figsize)
plot_param_vs_metric_by_model(results=results_to_plot,
grp_var=param,
metric=metric,
colors=model_colors,
show_std=show_std,
ls=model_ls,
markers=model_markers,
marker_size=fontsize,
label_dict=formal_names)
plt.ylim(metric_ylims[metric])
plt.ylabel('{} to {}'.format(metric_titles[metric], vs),
fontsize=fontsize)
plt.xlabel(param_title, fontsize=fontsize)
plt.xticks(fontsize=tick_font_size)
plt.yticks(fontsize=tick_font_size)
plt.title(title_stub, fontsize=fontsize)
savefig(join(save_dir, '{}__vs__{}__{}.png'.format(name_stub, vs, metric)),
dpi=dpi)
| python |
from collections import Counter
from dataclasses import dataclass
from itertools import zip_longest
@dataclass
class Point:
x: int
y: int
def __hash__(self):
return hash((self.x, self.y))
@dataclass
class Segment:
a: Point
b: Point
def orthogonal(self):
return self.a.x == self.b.x or self.a.y == self.b.y
def points(self):
x = (lambda step: list(range(self.a.x, self.b.x + step, step)))((-1, 1)[self.b.x >= self.a.x])
y = (lambda step: list(range(self.a.y, self.b.y + step, step)))((-1, 1)[self.b.y >= self.a.y])
return [(x, y) for x, y in zip_longest(x, y, fillvalue=(self.a.y, self.a.x)[len(x) == 1])]
def main(segments):
# Part I
c = Counter()
for segment in list(filter(lambda s: s.orthogonal(), segments)):
for point in segment.points():
c[point] += 1
print(f'Part 1: {len(list(filter(lambda k: c[k] > 1, c)))}')
# Part II
c = Counter()
for segment in segments:
for point in segment.points():
c[point] += 1
print(f'Part 2: {len(list(filter(lambda k: c[k] > 1, c)))}')
if __name__ == '__main__':
main(
list(
map(
lambda x:
Segment(*list(map(lambda y: Point(*list(map(lambda z: int(z.strip()), y.split(',')))), x.split('->')))),
open('input/5.txt').readlines()
)
)
)
| python |
# Config
# If you know what your doing, feel free to edit the code.
# Bot Token
# Token of the Bot which you want to use.
TOKEN = ""
# Log File
# Where all the logs of everything are stored.
# Default: "logs.txt"
LOG_FILE = "logs.txt"
# File where the codes are stored.
# Codes are given out by lines, so make sure they are split line by line.
# Default: "codes.txt"
CODES_FILE = "codes.txt"
# Role ID
# This is the ID of the role which is allowed to use the gen.
ROLE_ID = 867366769392091157
# Cooldown
# This is the seconds cooldown each user has per usage.
# 86400 is a day / 3600 is an hour
COOLDOWN = 86400
# imports here
import asyncio
import discord
from discord.ext import commands
import random
import aiofiles
import time
from datetime import datetime
from colorama import Fore, init
init(autoreset=True)
gen_role = None
bot = commands.Bot(command_prefix="-", intents=discord.Intents.all(), case_insensitive=True) # prefix here
async def getEmbed(type, arg=None): # change colours if you want to here
if type == 0:
embed = discord.Embed(title="Sent you a code.", description="Check your DMs.", colour=discord.Colour.green())
return embed
elif type == 1:
embed = discord.Embed(title="Here's your Generated Code.", description=arg, colour=discord.Colour.blue())
return embed
elif type == 2:
embed = discord.Embed(title="Out of stock.", description="Generator is out of stock.", colour=discord.Colour.red())
return embed
elif type == 3:
embed = discord.Embed(title="Timeout.", description=f"You are on timeout, retry in **{arg}**.", colour=discord.Colour.red())
return embed
elif type == 4:
embed = discord.Embed(title="No Perms.", description=f"You do not have permission to execute this command.", colour=discord.Colour.red())
return embed
async def convert(seconds):
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%dh %2dm %2ds" % (hour, minutes, seconds)
async def log(event, user=None, info=None): # logging in log.txt if you want to edit them
now = datetime.now()
timedata = f"{now.strftime('%Y-%m-%d %H:%M:%S')}"
writeable = ""
if event == "generated":
writeable += "[ GENERATE ] "
elif event == "cooldown":
writeable += "[ COOLDOWN ] "
elif event == "no stock":
writeable += "[ NO STOCK ] "
elif event == "no dms":
writeable += "[ NO DMS ] "
elif event == "bootup":
writeable += "\n[ BOOTUP ] "
elif event == "ping":
writeable += "[ PING ] "
elif event == "no perms":
writeable += "[ NO PERMS ] "
elif event == "userinfo":
writeable += "[ USERINFO ] "
elif event == "error":
writeable += "[ CRITICAL ] "
writeable += timedata
try:
writeable += f" ID: {user.id} User: {user.name}#{user.discriminator} // "
except:
writeable += f" // "
if event == "generated":
info = info.strip('\n')
writeable += f"User was successfully sent code: {info}"
elif event == "cooldown":
writeable += f"User couldn't be sent a code as they are on a cooldown of {info}."
elif event == "no stock":
writeable += f"User couldn't be sent a code as there is no stock."
elif event == "no dms":
writeable += f"User couldn't be sent a code as their DMs were disabled."
elif event == "bootup":
writeable += "Bot was turned on."
elif event == "ping":
writeable += "User used the ping command."
elif event == "no perms":
writeable += f"User does not have the significant permissions for the {info} command."
elif event == "userinfo":
writeable += f"User used the userinfo command on: {info}"
elif event == "error":
writeable += info
async with aiofiles.open(LOG_FILE, mode="a") as file:
await file.write(f"\n{writeable}")
if writeable.startswith("[ NO STOCK ]"):
print(Fore.LIGHTYELLOW_EX + writeable.strip('\n'))
elif writeable.startswith("[ CRITICAL ]"):
for x in range(3):
print(Fore.LIGHTRED_EX + writeable.strip('\n'))
elif writeable.startswith("[ BOOTUP ]"):
print(Fore.LIGHTGREEN_EX + writeable.strip('\n'))
@bot.event
async def on_ready():
global gen_role
try:
open(LOG_FILE, "x").close()
except:
pass
try:
open(CODES_FILE, "x").close()
except:
pass
await log("bootup")
for guild in bot.guilds:
role = guild.get_role(ROLE_ID)
if role != None:
gen_role = role
break
if gen_role == None:
await log("error", user=None, info=f"Cannot fetch role ({ROLE_ID}) from {bot.guilds[0].name}. Exiting in 5 seconds.")
await asyncio.sleep(5)
exit()
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
time_retry = await convert(error.retry_after)
await ctx.send(content = ctx.author.mention, embed = await getEmbed(3, time_retry))
await log("cooldown", ctx.author, time_retry)
elif isinstance(error, commands.MissingRole):
await ctx.send(content = ctx.author.mention, embed = await getEmbed(4))
await log("no perms", ctx.author, "generate")
@bot.command()
@commands.cooldown(1, COOLDOWN) # 1 is codes per cooldown // 86400 is the cooldown time (is in second)
@commands.has_role(ROLE_ID) # role for gen perms
@commands.guild_only()
async def generate(ctx):
try:
dm_msg = await ctx.author.send("Processing your request...")
except:
embed = discord.Embed(title="DMs are disabled!", description="Your dms are disabled. Enable them in Privacy Settings.", colour=discord.Colour.red())
embed.set_image(url="https://cdn.discordapp.com/attachments/829087959331897364/850841491470548992/ezgif-2-ca6ebd5d9cfb.gif")
await ctx.send(content=ctx.author.mention, embed=embed)
await log("no dms", ctx.author)
return
async with aiofiles.open("codes.txt", mode="r") as file: # name of codes file
file_lines = await file.readlines()
try:
code = random.choice(file_lines)
except:
await dm_msg.edit(embed=await getEmbed(type=2), content=ctx.author.mention)
await ctx.send(embed=await getEmbed(type=2), content=ctx.author.mention)
bot.get_command("generate").reset_cooldown(ctx)
await log("no stock", ctx.author)
return
else:
file_lines.remove(code)
async with aiofiles.open("codes.txt", mode="w") as file: # name of codes file
for line in file_lines:
if file_lines[-1] != line:
await file.write(line)
else:
await file.write(line.strip("\n"))
await dm_msg.edit(embed=await getEmbed(type=1,arg=code), content=ctx.author.mention)
await ctx.send(embed=await getEmbed(type=0), content=ctx.author.mention)
await log("generated", ctx.author, code)
@bot.command()
async def userinfo(ctx, *, user : discord.Member = None):
if user == None:
user = ctx.author
if gen_role in user.roles:
des = f"Generator: `🟢`"
else:
des = f"Generator: `🔴`"
embed = discord.Embed(color=discord.Colour.blue(), description=des, title=" ")
embed.set_author(name=f"{user.name}#{user.discriminator}", icon_url=user.default_avatar_url)
await ctx.send(embed=embed, content=ctx.author.mention)
await log("userinfo", user=ctx.author, info=f"{user.name}#{user.discriminator}")
@bot.command()
async def ping(ctx):
embed = discord.Embed(title="Response Times", color=discord.Colour.blue()) # colour of ping command
embed.add_field(name="API", value=f"`Loading...`")
embed.add_field(name="Websocket", value=f"`{int(bot.latency * 1000)}ms`")
time_before = time.time()
edit = await ctx.send(embed=embed, content=f"{ctx.author.mention}")
time_after = time.time()
difference = int((time_after - time_before) * 1000)
embed = discord.Embed(title="Response Times", color=discord.Colour.green()) # colour of ping command
embed.add_field(name="API", value=f"`{difference}ms`")
embed.add_field(name="Websocket", value=f"`{int(bot.latency * 1000)}ms`")
await edit.edit(embed=embed, content=f"{ctx.author.mention}")
await log("ping", ctx.author)
bot.run(TOKEN)
| python |
import discord
from discord.ext import commands
class ListMyGames(commands.Cog):
def __init__(self, client):
self.client = client
self.db = self.client.firestoreDb
@commands.command(brief="Lists games you have already acquired")
async def listmygames(self, ctx):
await ctx.author.send("One moment, grabbing a list of games.")
keys_refs = self.db.collection('user').document(str(ctx.author.id)).collection('keys').stream()
appendedGamesStr = ""
if not (keys_refs):
appendedGamesStr = "Your game list is empty."
else:
for game in keys_refs:
appendedGamesStr = appendedGamesStr + (f'{game.id}\n')
if appendedGamesStr:
await ctx.author.send(f"""```{appendedGamesStr}```""")
else:
await ctx.author.send("One moment, grabbing your list of games.")
def setup(client):
client.add_cog(ListMyGames(client)) | python |
# Validating phone numbers
# Problem Link: https://www.hackerrank.com/challenges/validating-the-phone-number/problem
import re
for _ in range(int(input())):
print("YES" if re.match("^[789]\d{9}$", input()) else "NO")
| python |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
names=['AGE','TB','DB','TP','Albumin','A/G','sgpt','sgot','ALKPHOS','GENDER']
dataset=pd.read_csv("Indian Liver Patient Dataset.csv")
##||REMOVING NAN FILES AS COLLEGE GAVE BAD DATASET||##
dataset1=dataset.dropna(subset = ['AGE','TB','DB','TP','Albumin','A/G','sgpt','sgot','ALKPHOS','GENDER'])
X=dataset1.iloc[:,:-1].values # REJECTING THE LAST COLUMN
y=dataset1.iloc[:,8].values
y=y.astype('int')## REMOVING CONTIGUOS FILES
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate the classifier
gnb =MultinomialNB()
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
print('Model Score:',gnb.score(X_train,y_train))
mean_survival=np.mean(X_train)
mean_not_survival=100-mean_survival
print("SUCCESS = {:03.2f}%, FAILURE = {:03.2f}%".format(mean_survival,mean_not_survival))
from sklearn.metrics import classification_report, confusion_matrix
print('||CONFUSION_MATRIX||')
print(confusion_matrix(y_test, y_pred))
print('\n')
print('||CLASSIFICATION_REPORT||')
print(classification_report(y_test, y_pred))
| python |
"""
taskcat python module
"""
from ._cfn.stack import Stack # noqa: F401
from ._cfn.template import Template # noqa: F401
from ._cli import main # noqa: F401
from ._config import Config # noqa: F401
__all__ = ["Stack", "Template", "Config", "main"]
| python |
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import unittest
from io import StringIO
from botocore.exceptions import ClientError
try:
from unittest import mock
except ImportError:
import mock
from .utils import MockPath, MockSession
from heaviside.activities import TaskMixin, ActivityMixin
from heaviside.activities import fanout, fanout_nonblocking, SFN
from heaviside.exceptions import ActivityError
# Suppress message about not handler for log messages
import logging
log = logging.getLogger("heaviside.activities")
log.addHandler(logging.NullHandler())
#log.addHandler(logging.StreamHandler())
class TimeoutError(ClientError):
def __init__(self):
op_name = "Test"
err_rsp = {
'Error': {
'Code': 'TaskTimedOut'
}
}
super(TimeoutError, self).__init__(err_rsp, op_name)
class BossError(Exception):
pass
class TestFanout(unittest.TestCase):
@mock.patch.object(SFN, 'create_name')
@mock.patch('heaviside.activities.time.sleep')
def test_args_generator(self, mSleep, mCreateName):
mCreateName.return_value = 'ZZZ'
iSession = MockSession()
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'stateMachineArn': 'XXX'
}]
}
client.start_execution.return_value = {
'executionArn': 'YYY'
}
client.describe_execution.return_value = {
'status': 'SUCCEEDED',
'output': 'null'
}
expected = [None]
actual = fanout(iSession,
'XXX',
(i for i in range(0,1)))
self.assertEqual(actual, expected)
calls = [
mock.call.list_state_machines(),
mock.call.start_execution(stateMachineArn = 'XXX',
name = 'ZZZ',
input = '0'),
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'YYY')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch.object(SFN, 'create_name')
@mock.patch('heaviside.activities.time.sleep')
def test_args_list(self, mSleep, mCreateName):
mCreateName.return_value = 'ZZZ'
iSession = MockSession()
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'stateMachineArn': 'XXX'
}]
}
client.start_execution.return_value = {
'executionArn': 'YYY'
}
client.describe_execution.return_value = {
'status': 'SUCCEEDED',
'output': 'null'
}
expected = [None]
actual = fanout(iSession,
'XXX',
[i for i in range(0,1)])
self.assertEqual(actual, expected)
calls = [
mock.call.list_state_machines(),
mock.call.start_execution(stateMachineArn = 'XXX',
name = 'ZZZ',
input = '0'),
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'YYY')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch.object(SFN, 'create_name')
@mock.patch('heaviside.activities.time.sleep')
def test_gt_concurrent(self, mSleep, mCreateName):
mCreateName.return_value = 'ZZZ'
iSession = MockSession()
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'stateMachineArn': 'XXX'
}]
}
client.start_execution.return_value = {
'executionArn': 'YYY'
}
client.describe_execution.return_value = {
'status': 'SUCCEEDED',
'output': 'null'
}
expected = [None, None]
actual = fanout(iSession,
'XXX',
[i for i in range(0,2)],
max_concurrent=1)
self.assertEqual(actual, expected)
calls = [
mock.call.list_state_machines(),
mock.call.start_execution(stateMachineArn = 'XXX',
name = 'ZZZ',
input = '0'),
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'YYY'),
mock.call.start_execution(stateMachineArn = 'XXX',
name = 'ZZZ',
input = '1'),
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'YYY'),
]
self.assertEqual(client.mock_calls, calls)
@mock.patch.object(SFN, 'create_name')
@mock.patch('heaviside.activities.time.sleep')
def test_sfn_error(self, mSleep, mCreateName):
mCreateName.return_value = 'ZZZ'
iSession = MockSession()
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'stateMachineArn': 'XXX'
}]
}
client.start_execution.side_effect = [
{ 'executionArn': 'YYY' },
{ 'executionArn': 'YYYY' }
]
client.describe_execution.side_effect = [
{ 'status': 'FAILED' },
{ 'status': 'RUNNING' }
]
client.get_execution_history.return_value = {
'events':[{
'executionFailedEventDetails':{
'error': 'error',
'cause': 'cause'
}
}]
}
try:
fanout(iSession,
'XXX',
[i for i in range(0,2)])
self.assertFalse(True, "fanout should result in an ActivityError")
except ActivityError as e:
self.assertEqual(e.error, 'error')
self.assertEqual(e.cause, 'cause')
calls = [
mock.call.list_state_machines(),
mock.call.start_execution(stateMachineArn = 'XXX',
name = 'ZZZ',
input = '0'),
mock.call.start_execution(stateMachineArn = 'XXX',
name = 'ZZZ',
input = '1'),
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'YYY'),
mock.call.get_execution_history(executionArn = 'YYY',
reverseOrder = True),
mock.call.describe_execution(executionArn = 'YYYY'),
mock.call.stop_execution(executionArn = 'YYYY',
error = "Heaviside.Fanout",
cause = "Sub-process error detected")
]
self.assertEqual(client.mock_calls, calls)
class TestFanoutNonBlocking(unittest.TestCase):
@mock.patch.object(SFN, 'create_name')
@mock.patch('heaviside.activities.time.sleep')
def test_gt_concurrent(self, mSleep, mCreateName):
mCreateName.return_value = 'ZZZ'
iSession = MockSession()
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'stateMachineArn': 'XXX'
}]
}
client.start_execution.return_value = {
'executionArn': 'YYY'
}
client.describe_execution.return_value = {
'status': 'SUCCEEDED',
'output': 'null'
}
args = {
'sub_sfn': 'XXX',
'common_sub_args': {},
'sub_args': [i for i in range(0,2)],
'max_concurrent': 1,
'rampup_delay': 10,
'rampup_backoff': 0.5,
'status_delay': 0,
'finished': False,
'running': [],
'results': [],
}
args1 = fanout_nonblocking(args, iSession)
self.assertFalse(args1['finished'])
self.assertEqual(args1['running'], ['YYY'])
self.assertEqual(args1['results'], [])
self.assertEqual(args1['rampup_delay'], 5)
args2 = fanout_nonblocking(args, iSession)
self.assertFalse(args2['finished'])
self.assertEqual(args2['running'], ['YYY'])
self.assertEqual(args2['results'], [None])
self.assertEqual(args2['rampup_delay'], 2)
args3 = fanout_nonblocking(args, iSession)
self.assertTrue(args3['finished'])
self.assertEqual(args3['running'], [])
self.assertEqual(args3['results'], [None, None])
self.assertEqual(args3['rampup_delay'], 2) # no processes launched
calls = [
mock.call.list_state_machines(),
mock.call.start_execution(stateMachineArn = 'XXX',
name = 'ZZZ',
input = '0'),
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'YYY'),
mock.call.start_execution(stateMachineArn = 'XXX',
name = 'ZZZ',
input = '1'),
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'YYY'),
]
self.assertEqual(client.mock_calls, calls)
class TestTaskMixin(unittest.TestCase):
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_success(self, mCreateSession):
iSession = MockSession()
client = iSession.client('stepfunctions')
mCreateSession.return_value = (iSession, '123456')
task = TaskMixin()
task.token = 'token'
self.assertEqual(task.token, 'token')
task.success(None)
self.assertEqual(task.token, None)
call = mock.call.send_task_success(taskToken = 'token',
output = 'null')
self.assertEqual(client.mock_calls, [call])
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_success_no_token(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
task = TaskMixin()
with self.assertRaises(Exception):
task.success(None)
self.assertEqual(task.token, None)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_success_timeout(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.send_task_success.side_effect = TimeoutError()
task = TaskMixin()
task.token = 'token'
task.success(None)
self.assertEqual(task.token, None)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_failure(self, mCreateSession):
iSession = MockSession()
client = iSession.client('stepfunctions')
mCreateSession.return_value = (iSession, '123456')
task = TaskMixin()
task.token = 'token'
self.assertEqual(task.token, 'token')
task.failure(None, None)
self.assertEqual(task.token, None)
call = mock.call.send_task_failure(taskToken = 'token',
error = None,
cause = None)
self.assertEqual(client.mock_calls, [call])
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_failure_no_token(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
task = TaskMixin()
with self.assertRaises(Exception):
task.failure(None, None)
self.assertEqual(task.token, None)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_failure_timeout(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.send_task_failure.side_effect = TimeoutError()
task = TaskMixin()
task.token = 'token'
task.failure(None, None)
self.assertEqual(task.token, None)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_heartbeat(self, mCreateSession):
iSession = MockSession()
client = iSession.client('stepfunctions')
mCreateSession.return_value = (iSession, '123456')
task = TaskMixin()
task.token = 'token'
task.heartbeat()
call = mock.call.send_task_heartbeat(taskToken = 'token')
self.assertEqual(client.mock_calls, [call])
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_heartbeat_no_token(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
task = TaskMixin()
with self.assertRaises(Exception):
task.heartbeat()
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_run_function(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
task = TaskMixin()
task.handle_task('token', None)
self.assertEqual(task.token, None)
call = mock.call.send_task_success(taskToken = 'token',
output = 'null')
self.assertEqual(client.mock_calls, [call])
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_run_generator(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
def target(input_):
yield
yield
return
# Just make sure the target is actually a generator
self.assertEqual(type(target(None)), types.GeneratorType)
task = TaskMixin(process = target)
task.handle_task('token', None)
self.assertEqual(task.token, None)
call = mock.call.send_task_success(taskToken = 'token',
output = 'null')
call_ = mock.call.send_task_heartbeat(taskToken = 'token')
calls = [call_, call_, call]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_run_activity_error(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
target = mock.MagicMock()
target.side_effect = ActivityError('error', 'cause')
task = TaskMixin(process = target)
task.handle_task('token', None)
self.assertEqual(task.token, None)
call = mock.call.send_task_failure(taskToken = 'token',
error = 'error',
cause = 'cause')
self.assertEqual(client.mock_calls, [call])
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_run_exception(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
target = mock.MagicMock()
target.side_effect = BossError('cause')
task = TaskMixin(process = target)
task.handle_task('token', None)
self.assertEqual(task.token, None)
call = mock.call.send_task_failure(taskToken = 'token',
error = 'BossError',
cause = 'cause')
self.assertEqual(client.mock_calls, [call])
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_run_timeout(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
target = mock.MagicMock()
target.side_effect = TimeoutError()
task = TaskMixin(process = target)
task.handle_task('token', None)
self.assertEqual(task.token, None)
self.assertEqual(client.mock_calls, [])
class TestActivityMixin(unittest.TestCase):
"""
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_constructor(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_activities.return_value = {
'activities':[{
'name': 'name',
'activityArn': 'XXX'
}]
}
activity = ActivityProcess('name', None)
self.assertEqual(activity.arn, 'XXX')
calls = [
mock.call.list_activities()
]
self.assertEqual(client.mock_calls, calls)
"""
# DP ???: How to test the import
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_create_activity(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.create_activity.return_value = {
'activityArn': 'XXX'
}
activity = ActivityMixin()
activity.name = 'name'
self.assertEqual(activity.arn, None)
activity.create_activity()
self.assertEqual(activity.arn, 'XXX')
calls = [
mock.call.create_activity(name = 'name')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_create_activity_exists(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
activity = ActivityMixin()
activity.arn = 'XXX'
activity.create_activity()
self.assertEqual(activity.arn, 'XXX')
calls = [
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_create_activity_exception(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
activity = ActivityMixin()
activity.arn = 'XXX'
with self.assertRaises(Exception):
activity.create_activity(exception=True)
self.assertEqual(activity.arn, 'XXX')
calls = [
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_delete_activity(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
activity = ActivityMixin()
activity.arn = 'XXX'
activity.delete_activity()
self.assertEqual(activity.arn, None)
calls = [
mock.call.delete_activity(activityArn = 'XXX')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_delete_doesnt_exist(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
activity = ActivityMixin()
self.assertEqual(activity.arn, None)
activity.delete_activity()
self.assertEqual(activity.arn, None)
calls = [
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_delete_activity_exception(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
activity = ActivityMixin()
self.assertEqual(activity.arn, None)
with self.assertRaises(Exception):
activity.delete_activity(exception=True)
self.assertEqual(activity.arn, None)
calls = [
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_poll_task_exception(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
activity = ActivityMixin()
self.assertEqual(activity.arn, None)
with self.assertRaises(Exception):
activity.poll_task(worker = 'worker')
calls = [
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_poll_task(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.get_activity_task.return_value = {
'taskToken': 'YYY',
'input': '{}'
}
activity = ActivityMixin()
activity.arn = 'XXX'
token, input_ = activity.poll_task('worker')
self.assertEqual(token, 'YYY')
self.assertEqual(input_, {})
calls = [
mock.call.get_activity_task(activityArn = 'XXX',
workerName = 'worker')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_poll_task_no_work(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.get_activity_task.return_value = {
'taskToken': ''
}
activity = ActivityMixin()
activity.arn = 'XXX'
token, input_ = activity.poll_task('worker')
self.assertEqual(token, None)
self.assertEqual(input_, None)
calls = [
mock.call.get_activity_task(activityArn = 'XXX',
workerName = 'worker')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.activities.random.sample', autospec=True)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_run(self, mCreateSession, mSample):
mSample.return_value = 'XXX'
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_activities.return_value = {
'activities':[{
'name': 'name',
'activityArn': 'XXX'
}]
}
client.get_activity_task.return_value = {
'taskToken': 'YYY',
'input': '{}'
}
target = mock.MagicMock()
activity = ActivityMixin(handle_task = target)
def stop_loop(*args, **kwargs):
activity.polling = False
return mock.DEFAULT
target.side_effect = stop_loop
activity.run('name')
calls = [
mock.call.list_activities(),
mock.call.get_activity_task(activityArn = 'XXX',
workerName = 'name-XXX')
]
self.assertEqual(client.mock_calls, calls)
calls = [
mock.call('YYY', {}),
mock.call().start()
]
self.assertEqual(target.mock_calls, calls)
@mock.patch('heaviside.activities.random.sample', autospec=True)
@mock.patch('heaviside.activities.create_session', autospec=True)
def test_run_exception(self, mCreateSession, mSample):
mSample.return_value = 'XXX'
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_activities.return_value = {
'activities':[{
'name': 'name',
'activityArn': 'XXX'
}]
}
activity = ActivityMixin()
def stop_loop(*args, **kwargs):
activity.polling = False
raise BossError(None)
client.get_activity_task.side_effect = stop_loop
activity.run('name')
calls = [
mock.call.list_activities(),
mock.call.get_activity_task(activityArn = 'XXX',
workerName = 'name-XXX')
]
self.assertEqual(client.mock_calls, calls)
| python |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-emailuser',
version='0.0.1',
packages=['emailuser'],
include_package_data=True,
license='BSD License', # example license
description='Djano email-based users.',
long_description=README,
url='https://github.com/rpkilby/django-emailuser/',
author='Ryan P Kilby',
author_email='[email protected]',
keywords="django emailuser email-based user",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| python |
"""Top-level package for explainy."""
__author__ = """Mauro Luzzatto"""
__email__ = "[email protected]"
__version__ = '0.1.14'
| python |
__version__ = "0.0.1"
__required_biorbd_min_version__ = "1.2.8"
| python |
# Import library
import sys
import rohub
import os
sys.path.insert(0, os.path.join(os.getcwd(), 'misc', 'rohub'))
import config
# Authenticate
rohub.login(username=config.username, password=config.password)
# metadata
metadata_contribution = {
'environment': 'agriculture',
'topic': 'exploration',
'filename': 'agriculture-exploration-cosmosuk',
'title': 'Cosmos-UK soil moisture',
'inputs': {'input1':{'name':"Inputs of the Jupyter Notebook - Cosmos-UK soil moisture",
'url':"https://doi.org/10.5281/zenodo.6567018"},
},
'outputs': {'content': 'table and figures',
'url': "https://doi.org/10.5281/zenodo.6566942"},
'author_GHuser': 'Environmental-DS-Book',
'references':{'ref1':{'name':'Daily and sub-daily hydrometeorological and soil data (2013-2019) [cosmos-uk]','url':"https://doi.org/10.5285/b5c190e4-e35d-40ea-8fbe-598da03a1185"},
'ref2':{'name':'Soil water content in southern england derived from a cosmic-ray soil moisture observing system – cosmos-uk','url':"https://doi.org/10.1002/hyp.10929"},
'ref3':{'name':'Cosmos: the cosmic-ray soil moisture observing system','url':"https://doi.org/10.5194/hess-16-4079-2012"}},
'sketch': {'title':'Image showing interactive plot of IceNet seasonal forecasts of Artic sea ice according to four lead times and months in 2020',
'path':'_temp/rohub/agriculture-exploration-cosmosuk/interactive_plotting.png'}
}
title_nb = metadata_contribution['title']
metadata_rohub = {
'title': f'{title_nb} (Jupyter Notebook) published in the Environmental Data Science book',
'research_areas': ['Environmental research', 'Soil science', 'Hydrology'],
'description': f'The research object refers to the {title_nb} notebook published in the Environmental Data Science book.',
'ros_type': 'Executable Research Object',
'ros_template': 'Executable Research Object folders structure',
}
# create
ro_title=metadata_rohub['title']
ro_research_areas=metadata_rohub['research_areas']
ro_description=metadata_rohub['description']
ro_ros_type=metadata_rohub['ros_type']
ro_ros_template=metadata_rohub['ros_template']
ro = rohub.ros_create(title=ro_title, research_areas=ro_research_areas, description=ro_description, ros_type=ro_ros_type, template=ro_ros_template)
# edition
authors=[
{"user_id": "https://github.com/acocac",
"display_name": "Alejandro Coca-Castro",
"name": "Alejandro Coca-Castro",
"affiliation": "The Alan Turing Institute"},
# {"user_id": "https://orcid.org/0000-0003-0808-3480",
# "display_name": "Raquel Carmo",
# "name": "Raquel Carmo",
# "affiliation": "European Space Agency Φ-lab"},
]
ro.set_authors(agents=authors)
reviewers=[
{"user_id": "https://github.com/dorankhamis",
"display_name": "Doran Khamis",
"name": "Doran Khamis",
"affiliation": "UK Centre for Ecology & Hydrology"},
{"user_id": "https://github.com/mattfry-ceh",
"display_name": "Matt Fry",
"name": "Matt Fry",
"affiliation": "UK Centre for Ecology & Hydrology"},
]
ro.set_contributors(agents=reviewers)
# List RO Folders
myfolders = ro.list_folders()
## sketch
rese_folder=myfolders[myfolders.path=='output']['identifier'].values
resi_res_type="Sketch"
resi_file_path=metadata_contribution['sketch']['path']
resi_title=metadata_contribution['sketch']['title']
my_res_int0=ro.add_internal_resource(res_type=resi_res_type,file_path=resi_file_path, title=resi_title, folder=rese_folder[0])
## tool
rese_folder=myfolders[myfolders.path=='tool']['identifier'].values
rese_res_type="Jupyter Notebook"
rese_file_url=f"https://github.com/{metadata_contribution['author_GHuser']}/{metadata_contribution['filename']}/blob/main/{metadata_contribution['filename']}.ipynb"
rese_title=f"Jupyter notebook"
rese_description="Jupyter Notebook hosted by the Environmental Data Science Book"
my_res_ext0=ro.add_external_resource(res_type=rese_res_type, url=rese_file_url, title=rese_title, description=rese_description, folder=rese_folder[0])
## input
rese_folder=myfolders[myfolders.path=='input']['identifier'].values
rese_res_type = "Dataset"
if len(metadata_contribution['inputs']) > 0:
for i in metadata_contribution['inputs']:
rese_file_url=metadata_contribution['inputs'][i]['url']
rese_title=f"Input {metadata_contribution['inputs'][i]['name']}"
rese_description=f"Contains input {metadata_contribution['inputs'][i]['name']} used in the Jupyter notebook of {metadata_contribution['title']}"
my_res_ext0=ro.add_external_resource(res_type=rese_res_type, url=rese_file_url, title=rese_title, description=rese_description, folder=rese_folder[0])
## output
rese_folder=myfolders[myfolders.path=='output']['identifier'].values
rese_res_type="Dataset"
rese_file_url=metadata_contribution['outputs']['url']
rese_title=f"Outputs"
rese_description=f"Contains outputs, ({metadata_contribution['outputs']['content']}), generated in the Jupyter notebook of {metadata_contribution['title']}"
my_res_ext0=ro.add_external_resource(res_type=rese_res_type, url=rese_file_url, title=rese_title, description=rese_description, folder=rese_folder[0])
## biblio
rese_folder=myfolders[myfolders.path=='biblio']['identifier'].values
rese_res_type="Bibliographic Resource"
if len(metadata_contribution['inputs']) > 0:
for i in metadata_contribution['references']:
rese_file_url=metadata_contribution['references'][i]['url']
rese_title=metadata_contribution['references'][i]['name']
rese_description = f"Related publication of the {metadata_contribution['topic']} presented in the Jupyter notebook"
my_res_ext0 = ro.add_external_resource(res_type=rese_res_type, url=rese_file_url, title=rese_title,
description=rese_description, folder=rese_folder[0])
# license
MIT_index = rohub.list_available_licenses().index("MIT")
ro.set_license(license_id=rohub.list_available_licenses()[MIT_index]) | python |
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
"""Very simple inter-object notification system.
This module is a brain-dead simple notification system involving a :class:`Broadcaster` and a
:class:`Listener`. A listener can only listen to one broadcaster. A broadcaster can have multiple
listeners. If the listener is connected, whenever the broadcaster calls :meth:`~Broadcaster.notify`,
the method with the same name as the broadcasted message is called on the listener.
"""
from collections import defaultdict
class Broadcaster:
"""Broadcasts messages that are received by all listeners.
"""
def __init__(self):
self.listeners = set()
def add_listener(self, listener):
self.listeners.add(listener)
def notify(self, msg):
"""Notify all connected listeners of ``msg``.
That means that each listeners will have their method with the same name as ``msg`` called.
"""
for listener in self.listeners.copy(): # listeners can change during iteration
if listener in self.listeners: # disconnected during notification
listener.dispatch(msg)
def remove_listener(self, listener):
self.listeners.discard(listener)
class Listener:
"""A listener is initialized with the broadcaster it's going to listen to. Initially, it is not connected.
"""
def __init__(self, broadcaster):
self.broadcaster = broadcaster
self._bound_notifications = defaultdict(list)
def bind_messages(self, messages, func):
"""Binds multiple message to the same function.
Often, we perform the same thing on multiple messages. Instead of having the same function
repeated again and agin in our class, we can use this method to bind multiple messages to
the same function.
"""
for message in messages:
self._bound_notifications[message].append(func)
def connect(self):
"""Connects the listener to its broadcaster.
"""
self.broadcaster.add_listener(self)
def disconnect(self):
"""Disconnects the listener from its broadcaster.
"""
self.broadcaster.remove_listener(self)
def dispatch(self, msg):
if msg in self._bound_notifications:
for func in self._bound_notifications[msg]:
func()
if hasattr(self, msg):
method = getattr(self, msg)
method()
class Repeater(Broadcaster, Listener):
REPEATED_NOTIFICATIONS = None
def __init__(self, broadcaster):
Broadcaster.__init__(self)
Listener.__init__(self, broadcaster)
def _repeat_message(self, msg):
if not self.REPEATED_NOTIFICATIONS or msg in self.REPEATED_NOTIFICATIONS:
self.notify(msg)
def dispatch(self, msg):
Listener.dispatch(self, msg)
self._repeat_message(msg)
| python |
import os
import pathlib
import shutil
from typing import List
from gaas.applications.image_coloring.config import \
ANIME_SKETCH_COLORIZATION_DATASET_DATASET_ID
from gaas.applications.image_coloring.dataset import \
AnimeSketchColorizationDatasetGenerator
from gaas.applications.image_coloring.utils.locations import (
get_colorgram_location, get_train_location, get_val_location)
from gaas.config import global_logger
from gaas.utils.exec_mode import get_data_root
from gaas.utils.filesys import create_dir_if_not_exist, recreate_dir
from gaas.utils.kaggle import get_extract_location
def get_data_ids(location) -> List[str]:
root = pathlib.Path(location)
files = root.glob('*')
data_ids = []
for file in files:
data_id = file.parts[-1].split('.')[0]
data_ids.append(data_id)
return data_ids
def generate_mini_dataset() -> None:
_ = AnimeSketchColorizationDatasetGenerator(type='PROD')
dev_dataset_root = get_data_root('DEV')
prod_dataset_root = get_data_root('PROD')
dev_dataset_location = get_extract_location(
dev_dataset_root, ANIME_SKETCH_COLORIZATION_DATASET_DATASET_ID)
prod_dataset_location = get_extract_location(
prod_dataset_root, ANIME_SKETCH_COLORIZATION_DATASET_DATASET_ID)
recreate_dir(dev_dataset_location)
dev_colorgram_location = get_colorgram_location(dev_dataset_location)
dev_train_location = get_train_location(dev_dataset_location)
dev_val_location = get_val_location(dev_dataset_location)
prod_colorgram_location = get_colorgram_location(prod_dataset_location)
prod_train_location = get_train_location(prod_dataset_location)
prod_val_location = get_val_location(prod_dataset_location)
create_dir_if_not_exist(dev_colorgram_location)
create_dir_if_not_exist(dev_train_location)
create_dir_if_not_exist(dev_val_location)
train_ids = get_data_ids(prod_train_location)[:10]
val_ids = get_data_ids(prod_val_location)[:10]
colorgram_ids = get_data_ids(prod_colorgram_location)[:10]
for train_id in train_ids:
shutil.copyfile(
os.path.join(prod_train_location, '{id}.png'.format(id=train_id)),
os.path.join(dev_train_location, '{id}.png'.format(id=train_id)))
for val_id in val_ids:
shutil.copyfile(
os.path.join(prod_val_location, '{id}.png'.format(id=val_id)),
os.path.join(dev_val_location, '{id}.png'.format(id=val_id)))
for colorgram_id in colorgram_ids:
shutil.copyfile(
os.path.join(prod_colorgram_location,
'{id}.json'.format(id=colorgram_id)),
os.path.join(dev_colorgram_location,
'{id}.json'.format(id=colorgram_id)))
global_logger.info('Generate mini dataset done.')
| python |
from . import extract | python |
from typing import Dict, Any
from typing import Tuple
class DataBuffer:
"""
Databuffer with rollover
"""
def __init__(self, cols, size):
self.size = size
self.entries = [None for i in range(size)]
self.counter = 0
self.cols = cols
self.col_to_idx = {c: idx for idx, c in enumerate(cols)}
def add(self, values: Tuple):
self.entries[self.counter % self.size] = values
self.counter += 1
def add_dict(self, dict_data):
self.add(tuple([dict_data[col] for col in self.cols]))
def get(self, key, limit=None, limit_from_tail=False):
col_idx = self.col_to_idx[key]
if self.counter > self.size:
idx = self.counter % self.size
remainder = self.size - idx
ordered_data = self.entries[-remainder:] + self.entries[:idx]
else:
ordered_data = self.entries[:self.counter]
if limit:
limit = min(limit, self.size)
if limit_from_tail:
result_list = ordered_data[-limit:]
else:
result_list = ordered_data[:limit]
else:
result_list = ordered_data
return [v[col_idx] for v in result_list]
def mean(self, key, limit=None, limit_from_tail=False):
data = list(filter(None, self.get(key, limit, limit_from_tail)))
data_len = len(data)
result = 0
if data_len > 0:
result = sum(data) / data_len
return result
def sum(self, key, limit=None, limit_from_tail=False):
data = list(filter(None, self.get(key, limit, limit_from_tail)))
data_len = len(data)
result = 0
if data_len > 0:
result = sum(data)
return result
def get_dict(self, cols=None, limit=None, limit_from_tail=False):
if cols is None:
cols = self.cols
return {col: self.get(col, limit, limit_from_tail) for col in cols}
def is_full(self):
return self.size <= self.counter
def clear(self):
self.entries = [None for i in range(self.size)]
self.counter = 0
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-18 18:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0025_auto_20190217_2026'),
]
operations = [
migrations.AddField(
model_name='user',
name='allowed_inquiries',
field=models.IntegerField(default=5, verbose_name='Remaining Inquiries'),
),
]
| python |
__all__ = ["battleground", "bridge", "forest", "mountains", "store"]
| python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: fuzzy/control.py
# Fuzzy based controllers, or fuzzy inference systems
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements fuzzy controllers, of fuzzy inference systems.
There are two types of controllers implemented in this package. The Mamdani
controller is the traditional approach, where input (or controlled) variables
are fuzzified, a set of decision rules determine the outcome in a fuzzified way,
and a defuzzification method is applied to obtain the numerical result.
The Sugeno controller operates in a similar way, but there is no defuzzification
step. Instead, the value of the output (or manipulated) variable is determined
by parametric models, and the final result is determined by a weighted average
based on the decision rules. This type of controller is also known as parametric
controller.
"""
################################################################################
import numpy
from numpy import zeros, array, dot
import types
from base import *
from mf import *
from norms import *
from defuzzy import *
################################################################################
# Basic Mamdani controller
################################################################################
class Controller(object):
'''
Basic Mamdani controller
This class implements a standard Mamdani controller. A controller based on
fuzzy logic has a somewhat complex behaviour, so it is not explained here.
There are numerous references that can be consulted.
It is essential to understand the format that decision rules must follow to
obtain correct behaviour of the controller. A rule is a tuple given by::
((mx0, mx1, ..., mxn), my)
where ``mx0`` is a membership function of the first input variable, ``mx1``
is a membership function of the second input variable and so on; and ``my``
is a membership function or a fuzzy set of the output variable.
Notice that ``mx``'s are *functions* not fuzzy sets! They will be applied to
the values of the input variables given in the function call, so, if they
are anything different from a membership function, an exception will be
raised. Please, consult the examples to see how they must be used.
'''
def __init__(self, yrange, rules=[], defuzzy=Centroid,
norm=ZadehAnd, conorm=ZadehOr, negation=ZadehNot,
imply=MamdaniImplication, aglutinate=MamdaniAglutination):
'''
Creates and initialize the controller.
:Parameters:
yrange
The range of the output variable. This must be given as a set of
points belonging to the interval where the output variable is
defined, not only the start and end points. It is strongly suggested
that the interval is divided in some (eg.: 100) points equally
spaced;
rules
The set of decision rules, as defined above. If none is given, an
empty set of rules is assumed;
defuzzy
The defuzzification method to be used. If none is given, the
Centroid method is used;
norm
The norm (``and`` operation) to be used. Defaults to Zadeh and.
conorm
The conorm (``or`` operation) to be used. Defaults to Zadeh or.
negation
The negation (``not`` operation) to be used. Defaults to Zadeh not.
imply
The implication method to be used. Defaults to Mamdani implication.
aglutinate
The aglutination method to be used. Defaults to Mamdani
aglutination.
'''
self.__y = yrange
self.__rules = [ ]
if isinstance(rules, list):
for r in rules:
self.add_rule(r)
self.defuzzify = defuzzy
self.__AND__ = norm
self.__OR__ = conorm
self.__NOT__ = negation
self.__IMP__ = imply
self.__AGL__ = aglutinate
def __gety(self):
return self.__y
y = property(__gety, None)
'''Property that returns the output variable interval. Not writable'''
def __getrules(self):
return self.__rules[:]
rules = property(__getrules, None)
'''Property that returns the list of decision rules. Not writable'''
def set_norm(self, f):
'''
Sets the norm (``and``) to be used.
This method must be used to change the behavior of the ``and`` operation
of the controller.
:Parameters:
f
The function can be any function that takes two numerical values and
return one numerical value, that corresponds to the ``and`` result.
'''
if isinstance(f, numpy.vectorize):
self.__AND__ = f
elif isinstance(f, types.FunctionType):
self.__AND__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def set_conorm(self, f):
'''
Sets the conorm (``or``) to be used.
This method must be used to change the behavior of the ``or`` operation
of the controller.
:Parameters:
f
The function can be any function that takes two numerical values and
return one numerical value, that corresponds to the ``or`` result.
'''
if isinstance(f, numpy.vectorize):
self.__OR__ = f
elif isinstance(f, types.FunctionType):
self.__OR__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def set_negation(self, f):
'''
Sets the negation (``not``) to be used.
This method must be used to change the behavior of the ``not`` operation
of the controller.
:Parameters:
f
The function can be any function that takes one numerical value and
return one numerical value, that corresponds to the ``not`` result.
'''
if isinstance(f, numpy.vectorize):
self.__NOT__ = f
elif isinstance(f, types.FunctionType):
self.__NOT__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def set_implication(self, f):
'''
Sets the implication to be used.
This method must be used to change the behavior of the implication
operation of the controller.
:Parameters:
f
The function can be any function that takes two numerical values and
return one numerical value, that corresponds to the implication
result.
'''
if isinstance(f, numpy.vectorize):
self.__IMP__ = f
elif isinstance(f, types.FunctionType):
self.__IMP__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def set_aglutination(self, f):
'''
Sets the aglutination to be used.
This method must be used to change the behavior of the aglutination
operation of the controller.
:Parameters:
f
The function can be any function that takes two numerical values and
return one numerical value, that corresponds to the aglutination
result.
'''
if isinstance(f, numpy.vectorize):
self.__AGL__ = f
elif isinstance(f, types.FunctionType):
self.__AGL__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def add_rule(self, rule):
'''
Adds a decision rule to the knowledge base.
It is essential to understand the format that decision rules must follow
to obtain correct behaviour of the controller. A rule is a tuple must
have the following format::
((mx0, mx1, ..., mxn), my)
where ``mx0`` is a membership function of the first input variable,
``mx1`` is a membership function of the second input variable and so on;
and ``my`` is a membership function or a fuzzy set of the output
variable.
Notice that ``mx``'s are *functions* not fuzzy sets! They will be
applied to the values of the input variables given in the function call,
so, if they are anything different from a membership function, an
exception will be raised when the controller is used. Please, consult
the examples to see how they must be used.
'''
mx, my = rule
for m in mx:
if not (isinstance(m, Membership) or m is None):
raise ValueError, 'condition not a membership function'
if isinstance(my, Membership):
rule = (mx, my(self.__y))
elif not isinstance(my, FuzzySet):
raise ValueError, 'consequent not a fuzzy set or membership function'
self.__rules.append(rule)
def add_table(self, lx1, lx2, table):
'''
Adds a table of decision rules in a two variable controller.
Typically, fuzzy controllers are used to control two variables. In that
case, the set of decision rules are given in the form of a table, since
that is a more compact format and very easy to visualize. This is a
convenience function that allows to add decision rules in the form of a
table. Notice that the resulting knowledge base will be the same if this
function is used or the ``add_rule`` method is used with every single
rule. The second method is in general easier to read in a script, so
consider well.
:Parameters:
lx1
The set of membership functions to the variable ``x1``, or the
lines of the table
lx2
The set of membership functions to the variable ``x2``, or the
columns of the table
table
The consequent of the rule where the condition is the line ``and``
the column. These can be the membership functions or fuzzy sets.
'''
for i in range(len(lx1)):
for j in range(len(lx2)):
my = table[i][j]
if my is not None:
self.add_rule(((lx1[i], lx2[j]), my))
def eval(self, r, xs):
'''
Evaluates one decision rule in this controller
Takes a rule from the controller and evaluates it given the values of
the input variables.
:Parameters:
r
The rule in the standard format, or an integer number. If ``r`` is
an integer, then the ``r`` th rule in the knowledge base will be
evaluated.
xs
A tuple, a list or an array containing the values of the input
variables. The dimension must be coherent with the given rule.
:Returns:
This method evaluates each membership function in the rule for each
given value, and ``and`` 's the results to obtain the condition. If
the condition is zero, a tuple ``(0.0, None) is returned. Otherwise,
the condition is ``imply`` ed in the membership function of the output
variable. A tuple containing ``(condition, imply)`` (the membership
value associated to the condition and the result of the implication)
is returned.
'''
if type(r) is types.IntType:
r = self.__rules[r]
mx, my = r
# Finds the membership value for each xn
cl = [ m(x) for m, x in zip(mx, xs) if m is not None ]
# Apply the ``and`` operation
mr = reduce(lambda x0, x1: self.__AND__(x0, x1), cl)
# Implication, unnecessary if mr == 0
if mr == 0.0:
return (0.0, None)
else:
return (mr, self.__IMP__(mr, my))
def eval_all(self, *xs):
'''
Evaluates all the rules and aglutinates the results.
Given the values of the input variables, evaluate and apply every rule
in the knowledge base (with the ``eval`` method) and aglutinates the
results.
:Parameters:
xs
A tuple, a list or an array with the values of the input variables.
:Returns:
A fuzzy set containing the result of the evaluation of every rule in
the knowledge base, with the results aglutinated.
'''
ry = FuzzySet(zeros(self.__y.shape))
for r in self.__rules:
mr, iy = self.eval(r, xs)
if mr != 0.0:
ry = self.__AGL__(ry, iy)
return ry
def __call__(self, *xs):
'''
Apply the controller to the set of input variables
Given the values of the input variables, evaluates every decision rule,
aglutinates the results and defuzzify it. Returns the response of the
controller.
:Parameters:
xs
A tuple, a list or an array with the values of the input variables.
:Returns:
The response of the controller.
'''
ry = self.eval_all(*xs)
return self.defuzzify(ry, self.__y)
class Mamdani(Controller):
'''``Mandani`` is an alias to ``Controller``'''
pass
################################################################################
# Basic Takagi-Sugeno controller
################################################################################
class Parametric(object):
'''
Basic Parametric controller
This class implements a standard parametric (or Takagi-Sugeno) controller. A
controller based on fuzzy logic has a somewhat complex behaviour, so it is
not explained here. There are numerous references that can be consulted.
It is essential to understand the format that decision rules must follow to
obtain correct behaviour of the controller. A rule is a tuple given by::
((mx0, mx1, ..., mxn), (a0, a1, ..., an))
where ``mx0`` is a membership function of the first input variable, ``mx1``
is a membership function of the second input variable and so on; and ``a0``
is the linear parameter, ``a1`` is the parameter associated with the first
input variable, ``a2`` is the parameter associated with the second input
variable and so on. The response to the rule is calculated by::
y = a0 + a1*x1 + a2*x2 + ... + an*xn
Notice that ``mx``'s are *functions* not fuzzy sets! They will be applied to
the values of the input variables given in the function call, so, if they
are anything different from a membership function, an exception will be
raised. Please, consult the examples to see how they must be used.
'''
def __init__(self, rules = [], norm=ProbabilisticAnd,
conorm=ProbabilisticOr, negation=ProbabilisticNot):
'''
Creates and initializes the controller.
:Parameters:
rules
List containing the decision rules for the controller. If not given,
an empty set of decision rules is used.
norm
The norm (``and`` operation) to be used. Defaults to Probabilistic
and.
conorm
The conorm (``or`` operation) to be used. Defaults to Probabilistic
or.
negation
The negation (``not`` operation) to be used. Defaults to
Probabilistic not.
'''
self.__rules = [ ]
if isinstance(rules, list):
for r in rules:
self.add_rules(r)
self.__AND__ = norm
self.__OR__ = conorm
self.__NOT__ = negation
def __getrules(self):
return self.__rules[:]
rules = property(__getrules, None)
'''Property that returns the list of decision rules. Not writable'''
def add_rule(self, rule):
'''
Adds a decision rule to the knowledge base.
It is essential to understand the format that decision rules must follow
to obtain correct behaviour of the controller. A rule is a tuple given
by::
((mx0, mx1, ..., mxn), (a0, a1, ..., an))
where ``mx0`` is a membership function of the first input variable,
``mx1`` is a membership function of the second input variable and so on;
and ``a0`` is the linear parameter, ``a1`` is the parameter associated
with the first input variable, ``a2`` is the parameter associated with
the second input variable and so on.
Notice that ``mx``'s are *functions* not fuzzy sets! They will be
applied to the values of the input variables given in the function call,
so, if they are anything different from a membership function, an
exception will be raised. Please, consult the examples to see how they
must be used.
'''
mx, a = rule
for m in mx:
if not (isinstance(m, Membership) or m is None):
raise ValueError, 'condition not a membership function'
a = array(a, dtype=float)
rule = (mx, a)
self.__rules.append(rule)
def eval(self, r, xs):
'''
Evaluates one decision rule in this controller
Takes a rule from the controller and evaluates it given the values of
the input variables. The format of the rule is as given, and the
response to the rule is calculated by::
y = a0 + a1*x1 + a2*x2 + ... + an*xn
:Parameters:
r
The rule in the standard format, or an integer number. If ``r`` is
an integer, then the ``r`` th rule in the knowledge base will be
evaluated.
xs
A tuple, a list or an array containing the values of the input
variables. The dimension must be coherent with the given rule.
:Returns:
This method evaluates each membership function in the rule for each
given value, and ``and`` 's the results to obtain the condition. If
the condition is zero, a tuple ``(0.0, 0.0) is returned. Otherwise,
the result as given above is calculate, and a tuple containing
``(condition, result)`` (the membership value associated to the
condition and the result of the calculation) is returned.
'''
if type(r) is types.IntType:
r = self.__rules[r]
mx, a = r
# Finds the membership value for each xn
cl = [ m(x) for m, x in zip(mx, xs) if m is not None ]
# Apply ``and`` operation
mr = reduce(lambda x0, x1: self.__AND__(x0, x1), cl)
# Implication, returns 0.0 if mr == 0
if mr > 0.0:
return (mr, dot(a, xs))
else:
return (0.0, 0.0)
def __call__(self, *xs):
'''
Apply the controller to the set of input variables
Given the values of the input variables, evaluates every decision rule,
and calculates the weighted average of the results. Returns the response
of the controller.
:Parameters:
xs
A tuple, a list or an array with the values of the input variables.
:Returns:
The response of the controller.
'''
ys = array([ self.eval(r, xs) for r in self.__rules ])
m = ys[:, 0]
y = ys[:, 1]
return sum(m*y) / sum(m)
class Sugeno(Parametric):
'''``Sugeno`` is an alias to ``Parametric``'''
pass
################################################################################
# Test
if __name__ == "__main__":
pass
| python |
import this # => display the Zen of Py
# 1. Any python file can be imported as module
# to load from another module:
import sys
sys.path += ["path_to_folder"] # and import MyModule
if __name__ == "__main__":
pass # this code will exec only if the script is ran. if loaded as module, it will not run
# PACKAGES
# MathOps - one must define main dir for package
# __init__.py - must have __init__.py, it runs at dir loading
# Simple
# __init__.py
# Arithmetic.py
# Bits.py
# One can access as import MathOps.Simple or import MathOps.Bits etc.abs
# for from MathOps import * to work, one must define __all__ = ["PyFileName1", ..] (dir relative)
# can dynnamically import module with m = importlib.import_module("name")
# with exec(string) one can execute dynamic code
| python |
import math
import itertools
import functools
import multiprocessing
import asyncio
import uuid
import numpy as np
import pymatgen as pmg
import lammps
from lammps.potential import (
write_table_pair_potential,
write_tersoff_potential,
write_stillinger_weber_potential,
write_gao_weber_potential,
write_vashishta_potential,
write_comb_potential,
write_comb_3_potential
)
from ..potential import Potential
from .base import DFTFITCalculator, MDCalculator, MDReader
class LammpsCythonWorker:
"""A lammps cython worker
All input and output is fully serializable.
"""
def __init__(self, structures, elements, potential_schema, unique_id=1):
self.structures = structures
self.elements = elements
self.potential = Potential(potential_schema)
self.unique_id = unique_id
self.lammps_systems = []
def _initialize_lammps(self, structure):
lmp = lammps.Lammps(units='metal', style='full', args=[
'-log', 'none', '-screen', 'none'
])
lmp.system.add_pymatgen_structure(structure, self.elements)
lmp.thermo.add('my_ke', 'ke', 'all')
return lmp
def create(self):
for structure in self.structures:
self.lammps_systems.append(self._initialize_lammps(structure))
def _apply_potential(self, potential):
lammps_commands = write_potential(potential, elements=self.elements, unique_id=self.unique_id)
for command in lammps_commands:
for lmp in self.lammps_systems:
lmp.command(command)
def worker_multiprocessing_loop(self, pipe):
while True:
message = pipe.recv()
if isinstance(message, str) and message == 'quit':
break
results = self.compute(message)
pipe.send(results)
pipe.close()
def compute(self, parameters):
self.potential.optimization_parameters = parameters
self._apply_potential(self.potential)
results = []
for lmp in self.lammps_systems:
lmp.run(0)
S = lmp.thermo.computes['thermo_press'].vector
results.append({
'forces': lmp.system.forces.copy(),
'energy': lmp.thermo.computes['thermo_pe'].scalar + lmp.thermo.computes['my_ke'].scalar,
'stress': np.array([
[S[0], S[3], S[5]],
[S[3], S[1], S[4]],
[S[5], S[4], S[2]]
])
})
return results
class LammpsCythonDFTFITCalculator(DFTFITCalculator):
"""This is not a general purpose lammps calculator. Only for dftfit
evaluations. For now there are not plans to generalize it.
"""
def __init__(self, structures, potential, num_workers=1):
self.unique_id = str(uuid.uuid1())
self.structures = structures
# ensure element indexes are the same between all lammps calculations
self.elements = set()
for structure in self.structures:
self.elements = self.elements | set(structure.species)
self.elements = list(self.elements)
self.workers = []
potential_schema = potential.as_dict()
if num_workers == 1:
self.workers.append(LammpsCythonWorker(structures, self.elements, potential_schema, self.unique_id))
else:
def create_worker(structures, elements, potential_schema, pipe):
worker = LammpsCythonWorker(structures, elements, potential_schema, self.unique_id)
worker.create()
worker.worker_multiprocessing_loop(pipe)
self.workers = []
structure_index = 0
rem = len(structures) % num_workers
n = math.floor(len(structures) / num_workers)
for i in range(num_workers):
p_conn, c_conn = multiprocessing.Pipe()
# hand out remaining to first rem < i
if num_workers - rem >= i:
subset_structures = structures[structure_index: structure_index+n+1]
structure_index += n + 1
else:
subset_structures = structures[structure_index: structure_index+n]
structure_index += n
p = multiprocessing.Process(target=create_worker, args=(subset_structures, self.elements, potential_schema, c_conn))
p.start()
self.workers.append((p, p_conn))
async def create(self):
# otherwise seperate process calls this method
if len(self.workers) == 1:
self.workers[0].create()
def _apply_potential_files(self, potential):
lammps_files = write_potential_files(potential, elements=self.elements, unique_id=self.unique_id)
for filename, content in lammps_files.items():
with open(filename, 'w') as f:
f.write(content)
async def submit(self, potential, properties=None):
properties = properties or {'stress', 'energy', 'forces'}
parameters = potential.optimization_parameters
self._apply_potential_files(potential)
results = []
if len(self.workers) == 1:
results = self.workers[0].compute(parameters)
else:
# send potential to each worker
for p, p_conn in self.workers:
p_conn.send(parameters)
# recv calculation results from each worker
for p, p_conn in self.workers:
results.extend(p_conn.recv())
md_readers = []
for structure, result in zip(self.structures, results):
md_readers.append(MDReader(energy=result['energy'], forces=result['forces'], stress=result['stress'], structure=structure))
return md_readers
def shutdown(self):
# nothing is needed if not using multiprocessing module
if len(self.workers) > 1:
for p, p_conn in self.workers:
p_conn.send('quit')
p.join()
class LammpsCythonMDCalculator(MDCalculator):
def __init__(self, num_workers=1):
self.unique_id = str(uuid.uuid1())
if num_workers != 1:
raise NotImplementedError('lammps-cython md calculator can only run with one worker')
async def create(self):
pass
async def submit(self, structure, potential, properties=None, lammps_additional_commands=None):
properties = properties or {'stress', 'energy', 'forces'}
results = {}
lammps_additional_commands = lammps_additional_commands or ['run 0']
lmp = lammps.Lammps(units='metal', style='full', args=[
'-log', 'none', '-screen', 'none'
])
elements, rotation_matrix = lmp.system.add_pymatgen_structure(structure)
inv_rotation_matrix = np.linalg.inv(rotation_matrix)
lmp.thermo.add('my_ke', 'ke', 'all')
if 'initial_positions' in properties:
results['initial_positions'] = np.dot(lmp.system.positions.copy(), inv_rotation_matrix)
lammps_files = write_potential_files(potential, elements=elements, unique_id=self.unique_id)
for filename, content in lammps_files.items():
with open(filename, 'w') as f:
f.write(content)
lammps_commands = write_potential(potential, elements=elements, unique_id=self.unique_id)
for command in lammps_commands:
lmp.command(command)
for command in lammps_additional_commands:
lmp.command(command)
# to handle non-orthogonal unit cells
if 'lattice' in properties:
lengths, angles_r = lmp.box.lengths_angles
angles = [math.degrees(_) for _ in angles_r]
results['lattice'] = pmg.Lattice.from_parameters(*lengths, *angles).matrix
if 'positions' in properties:
results['positions'] = np.dot(lmp.system.positions.copy(), inv_rotation_matrix)
if 'stress' in properties:
S = lmp.thermo.computes['thermo_press'].vector
results['stress'] = np.array([
[S[0], S[3], S[5]],
[S[3], S[1], S[4]],
[S[5], S[4], S[2]]
])
if 'energy' in properties:
results['energy'] = lmp.thermo.computes['thermo_pe'].scalar + lmp.thermo.computes['my_ke'].scalar
if 'forces' in properties:
results['forces'] = lmp.system.forces.copy()
if 'symbols' in properties:
results['symbols'] = [elements[i-1] for i in lmp.system.types[0]]
if 'velocities' in properties:
results['velocities'] = np.dot(lmp.system.velocities.copy(), inv_rotation_matrix)
if 'timesteps' in properties:
results['timesteps'] = lmp.time_step
# compatibility...
future = asyncio.Future()
future.set_result({'results': results})
return future
def vashishta_mixed_to_vashishta(element_parameters, override_parameters):
""" Vashishta mixing potential
Using tersoff for two body mixing rules.
"""
def mixing_params_from_singles(e1, e2):
p1 = [float(_) for _ in element_parameters[e1]]
p2 = [float(_) for _ in element_parameters[e2]]
# 13 inputs: 14 paramters
# H (*), eta (1), Zi (1), Zj (1), lambda1 (+), D (*), lambda4 (+), W (*)
# cuttoff: rc (1), r0 (1)
# B (1), gamma (1), C (1), costheta0 (1)
return [
math.sqrt(p1[0] * p2[0]), # H
p1[1], # eta
p1[2], # Zi
p2[2], # Zj
(p1[3] + p2[3]) / 2.0, # lambda 1
math.sqrt(p1[4] * p2[4]), # D
(p1[5] + p2[5]) / 2.0, # lambda4
math.sqrt(p1[6] * p2[6]), # W
p1[7], # r_cutoff (2)
p1[8], # B
p1[9], # gamma
p1[10], # r_0 (3)
p1[11], # C
p1[12], # costheta0
]
parameters = {}
for e1, e2, e3 in itertools.product(element_parameters, repeat=3):
mixing_parameters = mixing_params_from_singles(e1, e2)
if (e1, e2, e3) in override_parameters:
parameters[(e1, e2, e3)] = [float(p2) if p2 else p1 for p1, p2 in zip(mixing_parameters, override_parameters)]
else:
parameters[(e1, e2, e3)] = mixing_parameters
return parameters
def tersoff_2_to_tersoff(element_parameters, mixing_parameters):
def mixing_params_from_singles(e1, e2, mixing_value):
p1 = [float(_) for _ in element_parameters[e1]]
p2 = [float(_) for _ in element_parameters[e2]]
mixing = float(mixing_value)
return [
3.0, # m
1.0, # gamma
0.0, # lambda3
p1[0], # c
p1[1], # d
p1[2], # costheta0
p1[3], # n
p1[4], # beta
(p1[5] + p2[5]) / 2, # lambda2
mixing * math.sqrt(p1[6] * p2[6]), # B
math.sqrt(p1[7] * p2[7]), # R
math.sqrt(p1[8] * p2[8]), # D
(p1[9] + p2[9]) / 2, # lambda1
math.sqrt(p1[10] * p2[10]), # A
]
parameters = {}
for e1, e2, e3 in itertools.product(element_parameters, repeat=3):
if e1 == e2:
mixing_value = 1.0
else:
sorted_e1_e2 = tuple(sorted([e1, e2]))
mixing_value = mixing_parameters.get(sorted_e1_e2)
if mixing_value is None:
continue
parameters[(e1, e2, e3)] = mixing_params_from_singles(e1, e2, mixing_value)
return parameters
LAMMPS_POTENTIAL_NAME_MAPPING = {
'lennard-jones': 'lj/cut',
'beck': 'beck',
'zbl': 'zbl',
'buckingham': 'buck',
'tersoff-2': 'tersoff',
'tersoff': 'tersoff',
'stillinger-weber': 'sw',
'gao-weber': 'gw',
'vashishta': 'vashishta',
'vashishta-mixing': 'vashishta',
'comb': 'comb',
'comb-3': 'comb3',
'python-function': 'table'
}
def write_potential_files(potential, elements, unique_id=1):
"""Generate lammps files required by specified potential
Parameters
----------
potential: dftfit.potential.Potential
schema representation of potential
elements: list
list specifying the index of each element
unique_id: str
an id that can be used for files to guarentee uniqueness
"""
spec = potential.schema['spec']
lammps_files = {}
for i, pair_potential in enumerate(spec.get('pair', [])):
potential_lammps_name = LAMMPS_POTENTIAL_NAME_MAPPING.get(pair_potential['type'])
if pair_potential['type'] == 'tersoff-2':
element_parameters = {}
mixing_parameters = {}
for parameter in pair_potential['parameters']:
if len(parameter['elements']) == 1:
element_parameters[parameter['elements'][0]] = parameter['coefficients']
elif len(parameter['elements']) == 2:
mixing_parameters[tuple(sorted(parameter['elements']))] = parameter['coefficients'][0]
parameters = tersoff_2_to_tersoff(element_parameters, mixing_parameters)
elif pair_potential['type'] == 'vashishta-mixing':
element_parameters = {}
override_parameters = {}
for parameter in pair_potential['parameters']:
if len(parameter['elements']) == 1:
element_parameters[parameter['elements'][0]] = parameter['coefficients']
elif len(parameter['elements']) == 3:
override_parameters[tuple(parameter['elements'])] = parameter['coefficients']
parameters = vashishta_mixed_to_vashishta(element_parameters, override_parameters)
elif pair_potential['type'] in {'tersoff-2', 'tersoff', 'stillinger-weber', 'gao-weber', 'vashishta', 'comb', 'comb-3', 'python-function'}:
parameters = {}
for parameter in pair_potential['parameters']:
parameters[tuple(parameter['elements'])] = [float(_) for _ in parameter['coefficients']]
filename = '/tmp/lammps.%d.%s.%s' % (i, unique_id, potential_lammps_name)
if pair_potential['type'] in {'tersoff-2', 'tersoff'}:
lammps_files[filename] = write_tersoff_potential(parameters)
elif pair_potential['type'] == 'stillinger-weber':
lammps_files[filename] = write_stillinger_weber_potential(parameters)
elif pair_potential['type'] == 'gao-weber':
lammps_files[filename] = write_gao_weber_potential(parameters)
elif pair_potential['type'] in {'vashishta', 'vashishta-mixing'}:
lammps_files[filename] = write_vashishta_potential(parameters)
elif pair_potential['type'] == 'comb':
lammps_files[filename] = write_comb_potential(parameters)
elif pair_potential['type'] == 'comb-3':
lammps_files[filename] = write_comb_3_potential(parameters)
elif pair_potential['type'] == 'python-function':
cutoff = [float(_) for _ in pair_potential.get('cutoff', [1.0, 10.0])]
samples = int(pair_potential.get('samples', 1000))
def get_function(func_str):
d = {}
exec(func_str, d)
return d['potential']
potential_func = get_function(pair_potential['function'])
for (e1, e2), parameters in parameters.items():
float_parameters = [float(_) for _ in parameters]
f_r = functools.partial(potential_func, *float_parameters)
filename = '/tmp/lammps.%s.%s.%d.%s.%s' % (e1, e2, i, unique_id, potential_lammps_name)
lammps_files[filename] = write_table_pair_potential(f_r, samples=samples, bounds=cutoff)
return lammps_files
def write_potential(potential, elements, unique_id=1):
"""Generate lammps commands required by specified potential
Parameters
----------
potential: dftfit.potential.Potential
schema representation of potential
elements: list
list specifying the index of each element
unique_id: str
an id that can be used for files to guarentee uniqueness
Supported Potentials:
- two-body
- lennard-jones
- buckingham
- three-body
- tersoff-2, tersoff
- stillinger-weber
- gao-weber
- n-body
- coloumb charge (long + short range)
"""
spec = potential.schema['spec']
element_map = {e.symbol: i for i, e in enumerate(elements, start=1)}
# collect potentials in spec
potentials = []
lammps_commands = []
if ('charge' in spec) and ('kspace' in spec):
lammps_commands.append('kspace_style %s %f' % (spec['kspace']['type'], spec['kspace']['tollerance']))
for element, charge in spec['charge'].items():
lammps_commands.append('set type %d charge %f' % (element_map[element], float(charge)))
potentials.append(({
'pair_style': 'coul/long %f' % 10.0,
'pair_coeff': [('* *', 'coul/long', '')]
}))
for i, pair_potential in enumerate(spec.get('pair', [])):
potential_lammps_name = LAMMPS_POTENTIAL_NAME_MAPPING.get(pair_potential['type'])
if pair_potential['type'] in {'lennard-jones', 'beck', 'buckingham', 'zbl'}:
pair_coeffs = []
for parameter in pair_potential['parameters']:
e1, e2 = parameter['elements']
ij = ' '.join([str(_) for _ in sorted([element_map[e1], element_map[e2]])])
coefficients_str = ' '.join([str(float(coeff)) for coeff in parameter['coefficients']])
pair_coeffs.append((ij, potential_lammps_name, coefficients_str))
if pair_potential['type'] == 'zbl':
cutoff = pair_potential.get('cutoff', [3.0, 4.0])
pair_style = '%s %f %f' % (potential_lammps_name, cutoff[0], cutoff[1])
else:
pair_style = '%s %f' % (potential_lammps_name, pair_potential.get('cutoff', [10.0])[-1])
potentials.append({
'pair_style': pair_style,
'pair_coeff': pair_coeffs
})
elif pair_potential['type'] == 'python-function':
pair_coeffs = []
for parameter in pair_potential['parameters']:
e1, e2 = parameter['elements']
ij = ' '.join([str(_) for _ in sorted([element_map[e1], element_map[e2]])])
filename = '/tmp/lammps.%s.%s.%d.%s.%s' % (e1, e2, i, unique_id, potential_lammps_name)
pair_coeffs.append((ij, potential_lammps_name, '%s PAIR' % filename))
samples = pair_potential.get('samples', 1000)
potentials.append({
'pair_style': 'table linear %d' % samples,
'pair_coeff': pair_coeffs
})
elif pair_potential['type'] in {'tersoff-2', 'tersoff', 'stillinger-weber', 'gao-weber', 'vashishta', 'vashishta-mixing', 'comb', 'comb-3'}:
filename = '/tmp/lammps.%d.%s.%s' % (i, unique_id, potential_lammps_name)
if pair_potential['type'] == 'comb-3':
pair_style = '%s polar_off' % (potential_lammps_name)
else:
pair_style = potential_lammps_name
potentials.append({
'pair_style': pair_style,
'pair_coeff': [('* *', potential_lammps_name, '%s %s' % (
filename, ' '.join(str(e) for e in elements)))],
})
else:
raise ValueError('pair potential %s not implemented yet!' % (pair_potential['type']))
if len(potentials) == 1: # no need for hybrid/overlay
potential = potentials[0]
lammps_commands.append('pair_style %s' % potential['pair_style'])
for pair_coeff in potential['pair_coeff']:
lammps_commands.append('pair_coeff ' + pair_coeff[0] + ' ' + pair_coeff[2])
elif len(potentials) > 1: # use hybrid/overlay to join all potentials
lammps_commands.append('pair_style hybrid/overlay ' + ' '.join(potential['pair_style'] for potential in potentials))
for potential in potentials:
for pair_coeff in potential.get('pair_coeff', []):
lammps_commands.append('pair_coeff ' + ' '.join(pair_coeff))
return lammps_commands
| python |
# Generated by Django 2.0.5 on 2018-09-11 16:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("barriers", "0007_auto_20180905_1553")]
operations = [
migrations.RemoveField(model_name="barrierstatus", name="barrier"),
migrations.RemoveField(model_name="barrierstatus", name="created_by"),
migrations.DeleteModel(name="BarrierStatus"),
]
| python |
### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXUSERMETA(gxapi_cy.WrapUSERMETA):
"""
GXUSERMETA class.
The `GXUSERMETA <geosoft.gxapi.GXUSERMETA>` class handles user style metadata tied to real
data.
"""
def __init__(self, handle=0):
super(GXUSERMETA, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXUSERMETA <geosoft.gxapi.GXUSERMETA>`
:returns: A null `GXUSERMETA <geosoft.gxapi.GXUSERMETA>`
:rtype: GXUSERMETA
"""
return GXUSERMETA()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
@classmethod
def create(cls, format):
"""
Creates an empty `GXUSERMETA <geosoft.gxapi.GXUSERMETA>` object
:param format: :ref:`USERMETA_FORMAT` Type of Meta to create
:type format: int
:returns: `GXUSERMETA <geosoft.gxapi.GXUSERMETA>` Object
:rtype: GXUSERMETA
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapUSERMETA._create(GXContext._get_tls_geo(), format)
return GXUSERMETA(ret_val)
@classmethod
def create_s(cls, file):
"""
Create a `GXUSERMETA <geosoft.gxapi.GXUSERMETA>` from a file
:param file: File Name
:type file: str
:returns: `GXUSERMETA <geosoft.gxapi.GXUSERMETA>` Object
:rtype: GXUSERMETA
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapUSERMETA._create_s(GXContext._get_tls_geo(), file.encode())
return GXUSERMETA(ret_val)
def get_data_creation_date(self, date):
"""
Get the Data Creation Date
:param date: Date
:type date: float_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
date.value = self._get_data_creation_date(date.value)
def get_extents2d(self, min_x, min_y, max_x, max_y):
"""
Get the 2d Extents
:param min_x: MinX
:param min_y: MinY
:param max_x: MaxX
:param max_y: MaxY
:type min_x: float_ref
:type min_y: float_ref
:type max_x: float_ref
:type max_y: float_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
min_x.value, min_y.value, max_x.value, max_y.value = self._get_extents2d(min_x.value, min_y.value, max_x.value, max_y.value)
def get_extents3d(self, min_x, min_y, min_z, max_x, max_y, max_z):
"""
Get the 3d Extents
:param min_x: MinX
:param min_y: MinY
:param min_z: MinZ
:param max_x: MaxX
:param max_y: MaxY
:param max_z: MaxZ
:type min_x: float_ref
:type min_y: float_ref
:type min_z: float_ref
:type max_x: float_ref
:type max_y: float_ref
:type max_z: float_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
min_x.value, min_y.value, min_z.value, max_x.value, max_y.value, max_z.value = self._get_extents3d(min_x.value, min_y.value, min_z.value, max_x.value, max_y.value, max_z.value)
def get_ipj(self, ipj):
"""
Get the `GXIPJ <geosoft.gxapi.GXIPJ>`
:param ipj: Date
:type ipj: GXIPJ
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._get_ipj(ipj)
def get_meta_creation_date(self, date):
"""
Get the Meta Creation Date
:param date: Date
:type date: float_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
date.value = self._get_meta_creation_date(date.value)
def get_xml_format(self, format):
"""
Get the XML Format
:param format: :ref:`USERMETA_FORMAT`
:type format: int_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
format.value = self._get_xml_format(format.value)
def set_xml_format(self, format):
"""
Get the XML Format
:param format: :ref:`USERMETA_FORMAT`
:type format: int
.. versionadded:: 9.6
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_xml_format(format)
def compare(self, usermeta2):
"""
Compare 2 `GXUSERMETA <geosoft.gxapi.GXUSERMETA>`'s
:param usermeta2: Second UERMETA
:type usermeta2: GXUSERMETA
:returns: 0 - No
1 - Yes
:rtype: int
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._compare(usermeta2)
return ret_val
def get_data_creator(self, data_creator):
"""
Get the Data Creator
:param data_creator: DataCreator returned
:type data_creator: str_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
data_creator.value = self._get_data_creator(data_creator.value.encode())
def get_format(self, format):
"""
Get the File Format
:param format: Title returned
:type format: str_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
format.value = self._get_format(format.value.encode())
def get_meta_creator(self, meta_creator):
"""
Get the Meta Creator
:param meta_creator: MetaCreator returned
:type meta_creator: str_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
meta_creator.value = self._get_meta_creator(meta_creator.value.encode())
def get_project(self, project):
"""
Get the File Project
:param project: Title returned
:type project: str_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
project.value = self._get_project(project.value.encode())
def get_title(self, title):
"""
Get the Title
:param title: Title returned
:type title: str_ref
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
title.value = self._get_title(title.value.encode())
def serial(self, save_geo, file):
"""
Serialize `GXUSERMETA <geosoft.gxapi.GXUSERMETA>` to a `GXBF <geosoft.gxapi.GXBF>`.
:param save_geo: Output Geosoft Metadata?
:param file: File name to save to
:type save_geo: bool
:type file: str
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._serial(save_geo, file.encode())
def set_data_creation_date(self, date):
"""
Set the Data Creation Date
:param date: Date
:type date: float
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_data_creation_date(date)
def set_data_creator(self, data_creator):
"""
Set the Data Creator
:param data_creator: DataCreator
:type data_creator: str
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_data_creator(data_creator.encode())
def set_extents2d(self, min_x, min_y, max_x, max_y):
"""
Set the 2d Extents
:param min_x: MinX
:param min_y: MinY
:param max_x: MaxX
:param max_y: MaxY
:type min_x: float
:type min_y: float
:type max_x: float
:type max_y: float
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_extents2d(min_x, min_y, max_x, max_y)
def set_extents3d(self, min_x, min_y, min_z, max_x, max_y, max_z):
"""
Set the 3d Extents
:param min_x: MinX
:param min_y: MinY
:param min_z: MinZ
:param max_x: MaxX
:param max_y: MaxY
:param max_z: MaxZ
:type min_x: float
:type min_y: float
:type min_z: float
:type max_x: float
:type max_y: float
:type max_z: float
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_extents3d(min_x, min_y, min_z, max_x, max_y, max_z)
def set_format(self, format):
"""
Set the File Format
:param format: Format
:type format: str
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_format(format.encode())
def set_ipj(self, ipj):
"""
Set the `GXIPJ <geosoft.gxapi.GXIPJ>`
:param ipj: Date
:type ipj: GXIPJ
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_ipj(ipj)
def set_meta_creation_date(self, date):
"""
Set the Meta Creation Date
:param date: Date
:type date: float
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_meta_creation_date(date)
def set_meta_creator(self, meta_creator):
"""
Set the Meta Creator
:param meta_creator: MetaCreator
:type meta_creator: str
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_meta_creator(meta_creator.encode())
def set_project(self, project):
"""
Set the File Project
:param project: Project
:type project: str
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_project(project.encode())
def set_title(self, title):
"""
Set the Title
:param title: Title
:type title: str
.. versionadded:: 7.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_title(title.encode())
@classmethod
def update_extents_2d(cls, filename, ipj, min_x, min_y, max_x, max_y):
"""
Edit an existing XML metadata file by
changing the extents and projection data
:param filename: Filename of existing metadata to update
:param ipj: New projection
:param min_x: New MinX value
:param min_y: New MinY value
:param max_x: New MaxX value
:param max_y: New MaxY value
:type filename: str
:type ipj: GXIPJ
:type min_x: float
:type min_y: float
:type max_x: float
:type max_y: float
.. versionadded:: 7.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
gxapi_cy.WrapUSERMETA._update_extents_2d(GXContext._get_tls_geo(), filename.encode(), ipj, min_x, min_y, max_x, max_y)
@classmethod
def update_file_type(cls, file_name, new_file_type):
"""
Edit an existing XML metadata file by
changing the file type
:param file_name: Filename of existing metadata to update
:param new_file_type: New file type
:type file_name: str
:type new_file_type: str
.. versionadded:: 7.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
gxapi_cy.WrapUSERMETA._update_file_type(GXContext._get_tls_geo(), file_name.encode(), new_file_type.encode())
@classmethod
def save_file_lineage(cls, file_name, save_geo):
"""
Add lineage to XML
:param file_name: Filename of existing metadata to update
:param save_geo: Output Geosoft Metadata?
:type file_name: str
:type save_geo: bool
.. versionadded:: 8.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
gxapi_cy.WrapUSERMETA._save_file_lineage(GXContext._get_tls_geo(), file_name.encode(), save_geo)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer | python |
from contextlib import nullcontext as expectation_of_no_exceptions_raised
import pytest
from _config.combo.cookie_banner import JYLLANDSPOSTEN_ACCEPT_COOKIES
from _config.combo.log_in import (AMAZON_LOGIN_CREDENTIALS, AMAZON_LOGIN_FORM, JYLLANDSPOSTEN_LOGIN_CREDENTIALS,
JYLLANDSPOSTEN_LOGIN_FORM)
from _mock_data.url import external_url
from browserist import Browser, CookieBannerSettings, LoginCredentials, LoginForm1Step, LoginForm2Steps
@pytest.mark.parametrize("url, login_credentials, login_form, cookie_banner_settings", [
(external_url.AMAZON_COM, AMAZON_LOGIN_CREDENTIALS, AMAZON_LOGIN_FORM, None),
(external_url.JYLLANDSPOSTEN_DK, JYLLANDSPOSTEN_LOGIN_CREDENTIALS,
JYLLANDSPOSTEN_LOGIN_FORM, JYLLANDSPOSTEN_ACCEPT_COOKIES),
])
def test_combo_login_with_1_and_2_steps(
url: str,
login_credentials: LoginCredentials,
login_form: LoginForm1Step | LoginForm2Steps,
cookie_banner_settings: CookieBannerSettings | None,
browser_default_disable_images: Browser
) -> None:
with expectation_of_no_exceptions_raised():
browser = browser_default_disable_images
browser.open.url(url)
if cookie_banner_settings:
browser.combo.cookie_banner(cookie_banner_settings)
browser.combo.log_in(login_credentials, login_form)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import copy
import solutions.utils as utils
class CoProcessor:
def __init__(self, debug=True):
self._registers = {c: 0 for c in 'abcdefgh'}
self._pc = 0
self._mul_counter = 0
self._debug = debug
self._states = set()
if not self._debug:
self._registers['a'] = 1
@staticmethod
def _parse_param(param):
try:
return int(param)
except ValueError:
return param
@staticmethod
def _parse_lines(lines):
for line in lines:
opcode, x, y = line.split(' ')
x = CoProcessor._parse_param(x)
y = CoProcessor._parse_param(y)
yield opcode, x, y
def _set(self, x, y):
self._registers[x] = y if isinstance(y, int) else self._registers[y]
def _sub(self, x, y):
self._registers[x] -= y if isinstance(y, int) else self._registers[y]
def _mul(self, x, y):
self._registers[x] *= y if isinstance(y, int) else self._registers[y]
self._mul_counter += 1
def _jnz(self, x, y):
"""
jnz X Y jumps with an offset of the value of Y, but only if the value of X is not zero.
(An offset of 2 skips the next instruction, an offset of -1 jumps to the previous instruction, and so on.)
"""
if isinstance(x, int):
if isinstance(y, int):
# x int, y int
if x != 0:
self._pc += y
return True
else:
# x int, y str
if x != 0:
self._pc += self._registers[y]
return True
else:
x = self._registers[x]
if isinstance(y, int):
# x str, y int
if x != 0:
self._pc += y
return True
else:
# x str, y str
if x != 0:
self._pc += self._registers[y]
return True
def run(self, fpath):
code = list(CoProcessor._parse_lines(utils.get_input_by_line(fpath)))
while True:
state = (tuple([k for k in copy.deepcopy(self._registers).values()]), copy.deepcopy(self._pc))
#print(self._registers['h'], state)
#print(self._registers['h'], end='')
if state in self._states:
raise Exception(state)
else:
self._states.add(state)
z = None
opcode, x, y = code[self._pc]
if opcode == 'set':
# set X Y sets register X to the value of Y
self._set(x, y)
elif opcode == 'sub':
self._sub(x, y)
elif opcode == 'mul':
self._mul(x, y)
elif opcode == 'jnz':
z = self._jnz(x, y)
if z is None:
self._pc += 1
if self._pc < 0 or self._pc >= len(code):
return self._mul_counter if self._debug else self._registers['h']
def part1(dpath):
cp = CoProcessor(debug=True)
return cp.run(utils.get_fpath(dpath))
def part2(dpath):
raise NotImplementedError
# takes too long, so I need to check if there is a repetition point -- none found, need to test this better...
# optimize the program -- hmmm
cp = CoProcessor(debug=False)
return cp.run(utils.get_fpath(dpath))
| python |
"""
Object Co-segmentation Datasets
author - Sayan Goswami
email - [email protected]
"""
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision.datasets import ImageFolder, DatasetFolder
from torchvision.transforms import ToTensor
class DatasetABC(Dataset):
"""Abstract Base Class for Datasets"""
def __init__(self, image_dir, mask_dir):
self.image_dir = image_dir
self.mask_dir = mask_dir
self.img_size = (512, 512)
self.in_channels = 3
self.image_data = None
self.mask_data = None
self.length = None
def image_loader(self, path):
raise NotImplementedError("`image_loader` not implemented.")
def mask_loader(self, path):
raise NotImplementedError("`image_loader` not implemented.")
def __len__(self):
return self.length
def __getitem__(self, index):
image, im_label = self.image_data[index]
mask, ma_label = self.mask_data[index]
label = None
if im_label == ma_label:
label = im_label
data = {
"image": image,
"mask" : mask,
"label": label
}
return data
class iCosegDataset(DatasetABC):
def __init__(self, *, image_dir, mask_dir):
super().__init__(image_dir, mask_dir)
self.image_data = DatasetFolder(root=image_dir,
loader=self.image_loader,
extensions=["jpg"],
transform=ToTensor())
self.mask_data = DatasetFolder(root=mask_dir,
loader=self.mask_loader,
extensions=["png"])
self.length = len(self.image_data)
def image_loader(self, path):
img = Image.open(path).resize(self.img_size)
img = np.array(img).astype(np.float32)/255.0
return img
def mask_loader(self, path):
img = Image.open(path).resize(self.img_size)
img = np.array(img).astype(np.uint8)
return img
class PASCALVOCCosegDataset(DatasetABC):
def __init__(self, *, image_dir, mask_dir):
super().__init__(image_dir, mask_dir)
self.image_data = DatasetFolder(root=image_dir,
loader=self.image_loader,
extensions=["jpg"],
transform=ToTensor())
self.mask_data = DatasetFolder(root=mask_dir,
loader=self.mask_loader,
extensions=["png"])
self.length = len(self.image_data)
def image_loader(self, path):
img = Image.open(path).resize(self.img_size)
img = np.array(img).astype(np.float32)/255.0
return img
def mask_loader(self, path):
img = Image.open(path).convert('L').resize(self.img_size)
img = np.array(img).astype(np.uint8)/255.0
return img
class InternetDataset(DatasetABC):
def __init__(self, *, image_dir, mask_dir):
super().__init__(image_dir, mask_dir)
self.image_data = DatasetFolder(root=image_dir,
loader=self.image_loader,
extensions=["jpg"],
transform=ToTensor())
self.mask_data = DatasetFolder(root=mask_dir,
loader=self.mask_loader,
extensions=["png"])
self.length = len(self.image_data)
def image_loader(self, path):
img = Image.open(path).resize(self.img_size)
img = np.array(img).astype(np.float32)/255.0
return img
def mask_loader(self, path):
img = Image.open(path).convert('1').resize(self.img_size)
img = np.array(img).astype(np.uint8)
return img
class MSRCDataset(DatasetABC):
def __init__(self, *, image_dir, mask_dir):
super().__init__(image_dir, mask_dir)
self.image_data = DatasetFolder(root=image_dir,
loader=self.image_loader,
extensions=["bmp"],
transform=ToTensor())
self.mask_data = DatasetFolder(root=mask_dir,
loader=self.mask_loader,
extensions=["bmp"])
self.length = len(self.image_data)
def image_loader(self, path):
img = Image.open(path).resize(self.img_size)
img = np.array(img).astype(np.float32)/255.0
return img
def mask_loader(self, path):
img = Image.open(path).convert('1').resize(self.img_size)
img = np.array(img).astype(np.uint8)
return img
if __name__ == "__main__":
# iCoseg_dataset = iCosegDataset(image_dir="/Users/Sayan/Desktop/Research/IIT B/Vision/datasets/iCoseg/images",
# mask_dir="/Users/Sayan/Desktop/Research/IIT B/Vision/datasets/iCoseg/ground_truth")
# print(f"iCoseg_dataset: # samples = {len(iCoseg_dataset)}")
PASCALVOCCoseg_dataset = PASCALVOCCosegDataset(image_dir="/Users/Sayan/Desktop/Research/IIT B/Vision/datasets/PASCAL_coseg/images",
mask_dir="/Users/Sayan/Desktop/Research/IIT B/Vision/datasets/PASCAL_coseg/GT")
print(f"PASCALVOCCoseg_dataset: # samples = {len(PASCALVOCCoseg_dataset)}")
print(PASCALVOCCoseg_dataset[0])
# Internet_dataset = InternetDataset(image_dir="/Users/Sayan/Desktop/Research/IIT B/Vision/datasets/internet_dataset_ObjectDiscovery-data/internet_processed/images",
# mask_dir="/Users/Sayan/Desktop/Research/IIT B/Vision/datasets/internet_dataset_ObjectDiscovery-data/internet_processed/GT")
# print(f"Internet_dataset: # samples = {len(Internet_dataset)}")
# MSRC_dataset = MSRCDataset(image_dir="/Users/Sayan/Desktop/Research/IIT B/Vision/datasets/internet_dataset_ObjectDiscovery-data/MSRC_processed/images",
# mask_dir="/Users/Sayan/Desktop/Research/IIT B/Vision/datasets/internet_dataset_ObjectDiscovery-data/MSRC_processed/GT")
# print(f"MSRC_dataset: # samples = {len(MSRC_dataset)}")
| python |
""" This simulation is adapted from main for Bayesian inference analysis """
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
import plotter
import network
import os
import pickle
import numpy as np
# %%markdown
#
# %%
# do not use spatial convolution (set kernels supe small)
no_spatial_conv = True
# Use a unique name for each experiments
exp_name = 'Bayesian_Inference_Mod_test'
if no_spatial_conv:
exp_name += '_Dirac_Kernels'
########################################
######## Experiment PARAMETERS #########
# set number of neurons in
n_neurons_msi = 20
# Which conditions to test:
# 0 : both sensory stimuli, no cortical feedback
# 1 : both sensory stimuli, both cortical inputs
# 2 : both sensory stimuli, no cortical visual input
# 3 : both sensory stimuli, no cortical auditory input
# 4 : all auditory input (sensory, cortical), no visual
# 5 : all visual input (sensory, cortical), no auditory
# 6 : only auditory sensory input, both cortical
conditions_to_test = [0, 1]
# defines the stimuli with location (x,y), onset, duration
s_a_mean = 10
s_v_mean = 8
s_a_var = 1.5
s_v_var = 1.0
s_a_intensity = 0.5
s_v_intensity = 0.5
s_onset_temp = 0
# define the uncertaintiy of the inputs
sigma_s_v = 2
sigma_s_a = 2
sigma_c_v = 2
sigma_c_a = 2
readout_time = 3950
# define how many times we draw from the distribution
n_draws = 3
# Create the network and initialize all internal vars
net = network.Network(exp_name, n_neurons_msi=n_neurons_msi)
# %% Create directory according to exp name
# create directory for results if it doesnt extist
exp_name_neurons = exp_name + '_neurons_' + str(n_neurons_msi) + '_sigmas_' + str(sigma_s_v) + str(sigma_s_a) + str(
sigma_c_v) + str(sigma_c_a) + '_mean_a_' + str(s_a_mean) + '_mean_v_' + str(s_v_mean) + '_var_a_' + str(s_a_var) + '_var_v_' + str(s_v_var) + '_intens_a_' + str(s_a_intensity) + '_intens_v_' + str(s_v_intensity) + '_draws_' + str(n_draws)
# exp_dir = path = os.path.join(os.getcwd(), 'Results')
exp_dir = path = os.path.join(os.getcwd(), 'Results')
# create result directory if it doesnt exists
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
exp_dir = path = os.path.join(exp_dir, exp_name_neurons)
# check if directory exists and if its not empty
if os.path.exists(exp_dir) and os.listdir(exp_dir):
print('Directory ' + exp_dir +
' already exists. Using existing data and results...')
skip_simulation = True
else:
skip_simulation = False
print('Creating directory : ', exp_dir)
os.mkdir(exp_dir)
# create a file with all parameters
with open(os.path.join(exp_dir, exp_name_neurons + '.txt',), 'w+') as f: # Python 3: open(..., 'wb')
f.write(exp_name + '\n \n')
f.write('Audio Stimulus Mean : ' + str(s_a_mean) + '\n')
f.write('Audio Stimulus Variance : ' + str(s_a_var) + '\n')
f.write('Audio Stimulus Intensity : ' + str(s_a_intensity) + '\n')
f.write('Visual Stimulus Mean : ' + str(s_v_mean) + '\n')
f.write('Visual Stimulus Variance : ' + str(s_v_var) + '\n')
f.write('Visual Stimulus Intensity : ' + str(s_v_intensity) + '\n')
f.write('\n \n')
f.write('Sensory Audio Sigma : ' + str(sigma_s_a) + '\n')
f.write('Sensory Video Sigma : ' + str(sigma_s_v) + '\n')
f.write('Cortical Audio Sigma : ' + str(sigma_c_a) + '\n')
f.write('Cortical Video Sigma : ' + str(sigma_c_v) + '\n')
f.write('\n \n')
f.write('Conditions to test : ' + str(conditions_to_test) + '\n')
f.write('\n \n')
f.close()
# %%
########################################
# initiualize arrays
net_out = np.zeros((n_draws, len(conditions_to_test), net.n_neurons_msi))
sensory_input_v = np.zeros(
(n_draws, len(conditions_to_test), net.len_t, net.n_neurons_msi))
sensory_input_a = np.zeros(
(n_draws, len(conditions_to_test), net.len_t, net.n_neurons_msi))
cortical_input_v = np.zeros(
(n_draws, len(conditions_to_test), net.len_t, net.n_neurons_msi))
cortical_input_a = np.zeros(
(n_draws, len(conditions_to_test), net.len_t, net.n_neurons_msi))
r_all = np.zeros((n_draws, len(conditions_to_test),
net.len_t, net.n_neurons_msi))
p_pool_all = np.zeros((n_draws, len(conditions_to_test),
net.len_t, net.n_neurons_msi))
p_sensory_all = np.zeros(
(n_draws, len(conditions_to_test), net.len_t, net.n_neurons_msi))
q_fb_all = np.zeros((n_draws, len(conditions_to_test),
net.len_t, net.n_neurons_msi))
q_s2_v_all = np.zeros((n_draws, len(conditions_to_test),
net.len_t, net.n_neurons_msi))
q_s2_a_all = np.zeros((n_draws, len(conditions_to_test),
net.len_t, net.n_neurons_msi))
q_s1_v_all = np.zeros((n_draws, len(conditions_to_test),
net.len_t, net.n_neurons_msi))
q_s1_a_all = np.zeros((n_draws, len(conditions_to_test),
net.len_t, net.n_neurons_msi))
if not skip_simulation:
for i_draw in range(n_draws):
s_a_location = -1
s_v_location = -1
while s_a_location < 0 or s_a_location >= n_neurons_msi:
s_a_location = np.random.normal(loc=s_a_mean, scale=s_a_var)
# draw stimulus
while s_v_location < 0 or s_v_location >= n_neurons_msi:
s_v_location = np.random.normal(loc=s_v_mean, scale=s_v_var)
# draw stimulus location and intensity from distributions
for i_condi, condi in enumerate(conditions_to_test):
########################
# Create Input Stimuli #
########################
# sensory inputs
stimuli_s_v = {'loc': np.array([s_v_location]), 'onset': s_onset_temp,
'duration': net.len_t - s_onset_temp, 'sigma': sigma_s_v, 'weight': s_v_intensity}
stimuli_s_a = {'loc': np.array([s_a_location]), 'onset': s_onset_temp,
'duration': net.len_t - s_onset_temp, 'sigma': sigma_s_a, 'weight': s_a_intensity}
# cortical inputs
stimuli_c_v = {'loc': np.array([s_v_location]), 'onset': s_onset_temp,
'duration': net.len_t - s_onset_temp, 'sigma': sigma_c_v, 'weight': s_v_intensity}
stimuli_c_a = {'loc': np.array([s_a_location]), 'onset': s_onset_temp,
'duration': net.len_t - s_onset_temp, 'sigma': sigma_c_a, 'weight': s_a_intensity}
# Create inputs
sens_in_v, sens_in_a, cor_in_v, cor_in_a = net.create_inputs(
stimuli_s_v, stimuli_s_a, stimuli_c_v, stimuli_c_a, gauss=True)
# run the network with random locations
r, act, p_pool, p_sensory, q_fb, q_s2_v, q_s2_a, q_s1_v, q_s1_a = net.run(
i_condi, dirac_kernels=no_spatial_conv)
# save the data
net_out[i_draw, i_condi, :] = act[readout_time, :]
r_all[i_draw, i_condi, :, :] = r
# p_pool_all[i_draw, i_condi, :, :] = p_pool
# p_sensory_all[i_draw, i_condi, :, :] = p_sensory
q_fb_all[i_draw, i_condi, :, :] = q_fb
# q_s2_v_all[i_draw, i_condi, :, :] = q_s2_v
# q_s2_a_all[i_draw, i_condi, :, :] = q_s2_a
# q_s1_v_all[i_draw, i_condi, :, :] = q_s1_v
# q_s1_a_all[i_draw, i_condi, :, :] = q_s1_a
sensory_input_v[i_draw, i_condi, :, :] = sens_in_v
sensory_input_a[i_draw, i_condi, :, :] = sens_in_a
# cortical_input_v[i_draw, i_condi, :, :] = cor_in_v
# cortical_input_a[i_draw, i_condi, :, :] = cor_in_a
print('Draw ' + str(i_draw + 1) + ' of ' + str(n_draws) + ' Condition : ' +
str(i_condi + 1) + ' of ' + str(len(conditions_to_test)))
# %%
###### Save outputs ######
results_file = os.path.join(exp_dir, 'results.pkl')
if not os.path.exists(results_file):
with open(results_file, 'wb') as f:
pickle.dump([net_out, r_all, sensory_input_v, sensory_input_a], f)
else:
with open(results_file, 'rb') as f:
net_out, r_all, sensory_input_v, sensory_input_a = pickle.load(
f)
###### Plotting ######
# %%
fusion = np.zeros(n_draws).astype('bool')
for i_draw in range(n_draws):
fusion[i_draw] = not (
len(find_peaks(np.squeeze(net_out[i_draw, 1, :]), distance=1)[0]) > 1)
# find all modes of response
modes_response_fb_on = np.argmax(net_out[fusion, 1, :], 1)
modes_response_fb_off = np.argmax(net_out[fusion, 0, :], 1)
# find all modes of inputs
modes_input_a = np.argmax(sensory_input_a[fusion, 1, readout_time, :], 1)
modes_input_v = np.argmax(sensory_input_v[fusion, 1, readout_time, :], 1)
fig = plt.figure(figsize=(10, 10))
# plot the stuff
plt.hist(modes_response_fb_on, bins=21, range=(0, 20), alpha=0.5)
plt.hist(modes_response_fb_off, bins=21, range=(
0, 20), histtype='step', linestyle=('dashed'))
plt.hist(modes_input_a, bins=21, range=(0, 20), histtype='step')
plt.hist(modes_input_v, bins=21, range=(0, 20), histtype='step')
# caluclate means and vars from response
res_mean_fb_off = np.argmax(np.histogram(
modes_response_fb_off, bins=21, range=(0, 20))[0])
res_mean_fb_on = np.argmax(np.histogram(
modes_response_fb_on, bins=21, range=(0, 20))[0])
res_var_fb_off = np.var(modes_response_fb_off)
res_var_fb_on = np.var(modes_response_fb_on)
sens_a_var = np.var(modes_input_a)
sens_v_var = np.var(modes_input_v)
# calculate means and vars from input
computed_mean = np.argmax(np.mean(
sensory_input_a[fusion, 1, readout_time, :] * sensory_input_v[fusion, 1, readout_time, :], 0))
computed_var = (sens_a_var * sens_v_var) / (sens_a_var + sens_v_var)
print('\nModel Response Mean (Cort On): {0:.2f} \nModel Response Mean (Cort Off): {1:.2f} \nComputed Mean : {2:.2f}'.format(
res_mean_fb_on, res_mean_fb_off, computed_mean))
print('\nModel Response Variance (Cort On): {0:.2f} \nModel Response Variance (Cort Off): {1:.2f} \nComputed Variance : {2:.2f}'.format(
res_var_fb_on, res_var_fb_off, computed_var))
# save stuff
results_file = os.path.join(exp_dir, 'means_vars.pkl')
with open(results_file, 'wb') as f:
pickle.dump([res_mean_fb_on, res_var_fb_on, computed_mean,
computed_var, res_mean_fb_off, res_var_fb_off], f)
# %%
q_fb_all[:, :, 3950, 8]
| python |
"""
Core functions.
"""
import numpy as np
import warnings
from scipy.optimize import brentq, fsolve
from scipy.stats import ttest_ind, ttest_1samp
from fractions import Fraction
from .utils import get_prng, potential_outcomes, permute
def corr(x, y, alternative='greater', reps=10**4, seed=None, plus1=True):
r"""
Simulate permutation p-value for Pearson correlation coefficient
Parameters
----------
x : array-like
y : array-like
alternative : {'greater', 'less', 'two-sided'}
The alternative hypothesis to test
reps : int
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
plus1 : bool
flag for whether to add 1 to the numerator and denominator of the
p-value based on the empirical permutation distribution.
Default is True.
Returns
-------
tuple
Returns test statistic, p-value, simulated distribution
"""
prng = get_prng(seed)
tst = np.corrcoef(x, y)[0, 1]
sims = [np.corrcoef(permute(x, prng), y)[0, 1] for i in range(reps)]
left_pv = (np.sum(sims <= tst)+plus1) / (reps+plus1)
right_pv = (np.sum(sims >= tst)+plus1) / (reps+plus1)
if alternative == 'greater':
pvalue = right_pv
elif alternative == 'less':
pvalue = left_pv
elif alternative == 'two-sided':
pvalue = np.min([1, 2 * np.min([left_pv, right_pv])])
return tst, pvalue, sims
def spearman_corr(x, y, alternative='greater', reps=10**4, seed=None, plus1=True):
r"""
Simulate permutation p-value for Spearman correlation coefficient
Parameters
----------
x : array-like
y : array-like
alternative : {'greater', 'less', 'two-sided'}
The alternative hypothesis to test
reps : int
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
plus1 : bool
flag for whether to add 1 to the numerator and denominator of the
p-value based on the empirical permutation distribution.
Default is True.
Returns
-------
tuple
Returns test statistic, p-value, simulated distribution
"""
xnew = np.argsort(x)+1
ynew = np.argsort(y)+1
return corr(xnew, ynew, alternative=alternative, reps=reps, seed=seed)
def two_sample_core(potential_outcomes_all, nx, tst_stat, alternative='greater',
reps=10**5, keep_dist=False, seed=None, plus1=True):
r"""
Main workhorse function for two_sample and two_sample_shift
Parameters
----------
potential_outcomes_all : array-like
2D array of potential outcomes under treatment (1st column)
and control (2nd column). To be passed in from potential_outcomes
nx : int
Size of the treatment group x
reps : int
number of repetitions
tst_stat: function
The test statistic
alternative : {'greater', 'less', 'two-sided'}
The alternative hypothesis to test
keep_dist : bool
flag for whether to store and return the array of values
of the test statistic. Default is False.
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
plus1 : bool
flag for whether to add 1 to the numerator and denominator of the
p-value based on the empirical permutation distribution.
Default is True.
Returns
-------
float
the estimated p-value
float
the test statistic
list
The distribution of test statistics.
These values are only returned if `keep_dist` == True
"""
prng = get_prng(seed)
rr = list(range(potential_outcomes_all.shape[0]))
tst = tst_stat(potential_outcomes_all[:nx, 0],
potential_outcomes_all[nx:, 1])
thePvalue = {
'greater': lambda pUp, pDn: pUp+plus1/(reps+plus1),
'less': lambda pUp, pDn: pDn+plus1/(reps+plus1),
'two-sided': lambda pUp, pDn: 2 * np.min([0.5, \
pUp+plus1/(reps+plus1), \
pDn+plus1/(reps+plus1)])
}
if keep_dist:
dist = np.empty(reps)
for i in range(reps):
prng.shuffle(rr)
pp = np.take(potential_outcomes_all, rr, axis=0)
dist[i] = tst_stat(pp[:nx, 0], pp[nx:, 1])
pUp = np.sum(dist >= tst)/(reps+plus1)
pDn = np.sum(dist <= tst)/(reps+plus1)
return thePvalue[alternative](pUp, pDn), dist
else:
hitsUp = 0
hitsDn = 0
for i in range(reps):
prng.shuffle(rr)
pp = np.take(potential_outcomes_all, rr, axis=0)
hitsUp += tst_stat(pp[:nx, 0], pp[nx:, 1]) >= tst
hitsDn += tst_stat(pp[:nx, 0], pp[nx:, 1]) <= tst
pUp = hitsUp/(reps+plus1)
pDn = hitsDn/(reps+plus1)
return thePvalue[alternative](pUp, pDn)
def two_sample(x, y, reps=10**5, stat='mean', alternative="greater",
keep_dist=False, seed=None, plus1=True):
r"""
One-sided or two-sided, two-sample permutation test for equality of
two means, with p-value estimated by simulated random sampling with
reps replications.
Tests the hypothesis that x and y are a random partition of x,y
against the alternative that x comes from a population with mean
(a) greater than that of the population from which y comes,
if side = 'greater'
(b) less than that of the population from which y comes,
if side = 'less'
(c) different from that of the population from which y comes,
if side = 'two-sided'
If ``keep_dist``, return the distribution of values of the test statistic;
otherwise, return only the number of permutations for which the value of
the test statistic and p-value.
Parameters
----------
x : array-like
Sample 1
y : array-like
Sample 2
reps : int
number of repetitions
stat : {'mean', 't'}
The test statistic.
(a) If stat == 'mean', the test statistic is (mean(x) - mean(y))
(equivalently, sum(x), since those are monotonically related)
(b) If stat == 't', the test statistic is the two-sample t-statistic--
but the p-value is still estimated by the randomization,
approximating the permutation distribution.
The t-statistic is computed using scipy.stats.ttest_ind
(c) If stat is a function (a callable object), the test statistic is
that function. The function should take two arguments:
given a permutation of the pooled data, the first argument is the
"new" x and the second argument is the "new" y.
For instance, if the test statistic is the Kolmogorov-Smirnov distance
between the empirical distributions of the two samples,
$\max_t |F_x(t) - F_y(t)|$, the test statistic could be written:
f = lambda u, v: np.max( \
[abs(sum(u<=val)/len(u)-sum(v<=val)/len(v)) for val in np.concatenate([u, v])]\
)
alternative : {'greater', 'less', 'two-sided'}
The alternative hypothesis to test
keep_dist : bool
flag for whether to store and return the array of values
of the irr test statistic
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
plus1 : bool
flag for whether to add 1 to the numerator and denominator of the
p-value based on the empirical permutation distribution.
Default is True.
Returns
-------
float
the estimated p-value
float
the test statistic
list
The distribution of test statistics.
These values are only returned if `keep_dist` == True
"""
# Set up potential outcomes; under the null, all units are exchangeable
pot_out_all = np.column_stack(
[np.concatenate([x, y]), np.concatenate([x, y])])
# If stat is callable, use it as the test function. Otherwise, look in the
# dictionary
stats = {
'mean': lambda u, v: np.mean(u) - np.mean(v),
't': lambda u, v: ttest_ind(u, v, equal_var=True)[0]
}
if callable(stat):
tst_fun = stat
else:
tst_fun = stats[stat]
nx = len(x)
observed_tst = tst_fun(pot_out_all[:nx, 0], pot_out_all[nx:, 1])
res = two_sample_core(pot_out_all, nx, tst_fun, alternative=alternative,
reps=reps, keep_dist=keep_dist, seed=seed, plus1=plus1)
if keep_dist:
return res[0], observed_tst, res[1]
else:
return res, observed_tst
def two_sample_shift(x, y, reps=10**5, stat='mean', alternative="greater",
keep_dist=False, seed=None, shift=None, plus1=True):
r"""
One-sided or two-sided, two-sample permutation test for equality of
two means, with p-value estimated by simulated random sampling with
reps replications.
Tests the hypothesis that x and y are a random partition of x,y
against the alternative that x comes from a population with mean
(a) greater than that of the population from which y comes,
if side = 'greater'
(b) less than that of the population from which y comes,
if side = 'less'
(c) different from that of the population from which y comes,
if side = 'two-sided'
If ``keep_dist``, return the distribution of values of the test statistic;
otherwise, return only the number of permutations for which the value of
the test statistic and p-value.
Parameters
----------
x : array-like
Sample 1
y : array-like
Sample 2
reps : int
number of repetitions
stat : {'mean', 't'}
The test statistic.
(a) If stat == 'mean', the test statistic is (mean(x) - mean(y))
(equivalently, sum(x), since those are monotonically related)
(b) If stat == 't', the test statistic is the two-sample t-statistic--
but the p-value is still estimated by the randomization,
approximating the permutation distribution.
The t-statistic is computed using scipy.stats.ttest_ind
(c) If stat is a function (a callable object), the test statistic is
that function. The function should take two arguments:
given a permutation of the pooled data, the first argument is the
"new" x and the second argument is the "new" y.
For instance, if the test statistic is the Kolmogorov-Smirnov distance
between the empirical distributions of the two samples,
$\max_t |F_x(t) - F_y(t)|$, the test statistic could be written:
f = lambda u, v: np.max( \
[abs(sum(u<=val)/len(u)-sum(v<=val)/len(v)) for val in np.concatenate([u, v])]\
)
alternative : {'greater', 'less', 'two-sided'}
The alternative hypothesis to test
keep_dist : bool
flag for whether to store and return the array of values
of the irr test statistic
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
shift : float
The relationship between x and y under the null hypothesis.
(a) A constant scalar shift in the distribution of y. That is, x is equal
in distribution to y + shift.
(b) A tuple containing the function and its inverse $(f, f^{-1})$, so
$x_i = f(y_i)$ and $y_i = f^{-1}(x_i)$
plus1 : bool
flag for whether to add 1 to the numerator and denominator of the
p-value based on the empirical permutation distribution.
Default is True.
Returns
-------
float
the estimated p-value
float
the test statistic
list
The distribution of test statistics.
These values are only returned if `keep_dist` == True
"""
# Set up potential outcomes according to shift
if isinstance(shift, float) or isinstance(shift, int):
# Potential outcomes for all units under treatment
pot_outx = np.concatenate([x, y + shift])
# Potential outcomes for all units under control
pot_outy = np.concatenate([x - shift, y])
pot_out_all = np.column_stack([pot_outx, pot_outy])
elif isinstance(shift, tuple):
assert (callable(shift[0])), "Supply f and finverse in shift tuple"
assert (callable(shift[1])), "Supply f and finverse in shift tuple"
pot_out_all = potential_outcomes(x, y, shift[0], shift[1])
else:
raise ValueError("Bad input for shift")
# If stat is callable, use it as the test function. Otherwise, look in the
# dictionary
stats = {
'mean': lambda u, v: np.mean(u) - np.mean(v),
't': lambda u, v: ttest_ind(u, v, equal_var=True)[0]
}
if callable(stat):
tst_fun = stat
else:
tst_fun = stats[stat]
nx = len(x)
observed_tst = tst_fun(pot_out_all[:nx, 0], pot_out_all[nx:, 1])
res = two_sample_core(pot_out_all, nx, tst_fun, alternative=alternative,
reps=reps, keep_dist=keep_dist, seed=seed, plus1=plus1)
if keep_dist:
return res[0], observed_tst, res[1]
else:
return res, observed_tst
def two_sample_conf_int(x, y, cl=0.95, alternative="two-sided", seed=None,
reps=10**4, stat="mean", shift=None, plus1=True):
r"""
One-sided or two-sided confidence interval for the parameter determining
the treatment effect. The default is the "shift model", where we are
interested in the parameter d such that x is equal in distribution to
y + d. In general, if we have some family of invertible functions parameterized
by d, we'd like to find d such that x is equal in distribution to f(y, d).
Parameters
----------
x : array-like
Sample 1
y : array-like
Sample 2
cl : float in (0, 1)
The desired confidence level. Default 0.95.
alternative : {"two-sided", "lower", "upper"}
Indicates the alternative hypothesis.
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
reps : int
number of repetitions in two_sample
stat : {'mean', 't'}
The test statistic.
(a) If stat == 'mean', the test statistic is (mean(x) - mean(y))
(equivalently, sum(x), since those are monotonically related)
(b) If stat == 't', the test statistic is the two-sample t-statistic--
but the p-value is still estimated by the randomization,
approximating the permutation distribution.
The t-statistic is computed using scipy.stats.ttest_ind
(c) If stat is a function (a callable object), the test statistic is
that function. The function should take two arguments:
given a permutation of the pooled data, the first argument is the
"new" x and the second argument is the "new" y.
For instance, if the test statistic is the Kolmogorov-Smirnov distance
between the empirical distributions of the two samples,
$\max_t |F_x(t) - F_y(t)|$, the test statistic could be written:
f = lambda u, v: np.max( \
[abs(sum(u<=val)/len(u)-sum(v<=val)/len(v)) for val in np.concatenate([u, v])]\
)
shift : float
The relationship between x and y under the null hypothesis.
(a) If None, the relationship is assumed to be additive (e.g. x = y+d)
(b) A tuple containing the function and its inverse $(f, f^{-1})$, so
$x_i = f(y_i, d)$ and $y_i = f^{-1}(x_i, d)$
plus1 : bool
flag for whether to add 1 to the numerator and denominator of the
p-value based on the empirical permutation distribution.
Default is True.
Returns
-------
tuple
the estimated confidence limits
Notes
-----
xtol : float
Tolerance in brentq
rtol : float
Tolerance in brentq
maxiter : int
Maximum number of iterations in brentq
"""
# print warning
warnings.warn('This function is under construction and outputs may be unreliable.')
assert alternative in ("two-sided", "lower", "upper")
if shift is None:
shift_limit = max(abs(max(x) - min(y)), abs(max(y) - min(x)))
# FIXME: unused observed
# observed = np.mean(x) - np.mean(y)
elif isinstance(shift, tuple):
assert (callable(shift[0])), "Supply f and finverse in shift tuple"
assert (callable(shift[1])), "Supply f and finverse in shift tuple"
f = shift[0]
finverse = shift[1]
# Check that f is increasing in d; this is very ad hoc!
assert (f(5, 1) < f(5, 2)), "f must be increasing in the parameter d"
shift_limit = max(abs(fsolve(lambda d: f(max(y), d) - min(x), 0)),
abs(fsolve(lambda d: f(min(y), d) - max(x), 0)))
# FIXME: unused observed
# observed = fsolve(lambda d: np.mean(x) - np.mean(f(y, d)), 0)
else:
raise ValueError("Bad input for shift")
ci_low = -shift_limit
ci_upp = shift_limit
if alternative == 'two-sided':
cl = 1 - (1 - cl) / 2
if alternative != "upper":
if shift is None:
g = lambda q: cl - two_sample_shift(x, y, alternative="less", seed=seed,
shift=q, reps=reps, stat=stat, plus1=plus1)[0]
else:
g = lambda q: cl - two_sample_shift(x, y, alternative="less", seed=seed,
shift=(lambda u: f(u, q), lambda u: finverse(u, q)),
reps=reps, stat=stat, plus1=plus1)[0]
ci_low = brentq(g, -2 * shift_limit, 2 * shift_limit)
if alternative != "lower":
if shift is None:
g = lambda q: cl - two_sample_shift(x, y, alternative="greater", seed=seed,
shift=q, reps=reps, stat=stat, plus1=plus1)[0]
else:
g = lambda q: cl - two_sample_shift(x, y, alternative="greater", seed=seed,
shift=(lambda u: f(u, q), lambda u: finverse(u, q)),
reps=reps, stat=stat, plus1=plus1)[0]
ci_upp = brentq(g, -2 * shift_limit, 2 * shift_limit)
return ci_low, ci_upp
def one_sample(x, y=None, reps=10**5, stat='mean', alternative="greater",
keep_dist=False, seed=None, plus1=True):
r"""
One-sided or two-sided, one-sample permutation test for the mean,
with p-value estimated by simulated random sampling with
reps replications.
Alternatively, a permutation test for equality of means of two paired
samples.
Tests the hypothesis that x is distributed symmetrically symmetric about 0
(or x and y have the same center) against the alternative that x comes from
a population with mean
(a) greater than 0 (greater than that of the population from which y comes),
if side = 'greater'
(b) less than 0 (less than that of the population from which y comes),
if side = 'less'
(c) different from 0 (different from that of the population from which y comes),
if side = 'two-sided'
If ``keep_dist``, return the distribution of values of the test statistic;
otherwise, return only the number of permutations for which the value of
the test statistic and p-value.
Parameters
----------
x : array-like
Sample 1
y : array-like
Sample 2. Must preserve the order of pairs with x.
If None, x is taken to be the one sample.
reps : int
number of repetitions
stat : {'mean', 't'}
The test statistic. The statistic is computed based on either z = x or
z = x - y, if y is specified.
(a) If stat == 'mean', the test statistic is mean(z).
(b) If stat == 't', the test statistic is the t-statistic--
but the p-value is still estimated by the randomization,
approximating the permutation distribution.
(c) If stat is a function (a callable object), the test statistic is
that function. The function should take a permutation of the
data and compute the test function from it. For instance, if the
test statistic is the maximum absolute value, $\max_i |z_i|$,
the test statistic could be written:
f = lambda u: np.max(abs(u))
alternative : {'greater', 'less', 'two-sided'}
The alternative hypothesis to test
keep_dist : bool
flag for whether to store and return the array of values
of the irr test statistic
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
plus1 : bool
flag for whether to add 1 to the numerator and denominator of the
p-value based on the empirical permutation distribution.
Default is True.
Returns
-------
float
the estimated p-value
float
the test statistic
list
The distribution of test statistics.
These values are only returned if `keep_dist` == True
"""
prng = get_prng(seed)
if y is None:
z = x
elif len(x) != len(y):
raise ValueError('x and y must be pairs')
else:
z = np.array(x) - np.array(y)
thePvalue = {
'greater': lambda pUp, pDn: pUp+plus1/(reps+plus1),
'less': lambda pUp, pDn: pDn+plus1/(reps+plus1),
'two-sided': lambda pUp, pDn: 2 * np.min([0.5, \
pUp+plus1/(reps+plus1), \
pDn+plus1/(reps+plus1)])
}
stats = {
'mean': lambda u: np.mean(u),
't': lambda u: ttest_1samp(u, 0)[0]
}
if callable(stat):
tst_fun = stat
else:
tst_fun = stats[stat]
tst = tst_fun(z)
n = len(z)
if keep_dist:
dist = []
for i in range(reps):
dist.append(tst_fun(z * (1 - 2 * prng.randint(0, 2, n))))
pUp = np.sum(dist >= tst)/(reps + plus1)
pDn = np.sum(dist <= tst)/(reps + plus1)
return thePvalue[alternative](pUp, pDn), tst, dist
else:
hitsUp = 0
hitsDn = 0
for i in range(reps):
tv = tst_fun(z * (1 - 2 * prng.randint(0, 2, n)))
hitsUp += (tv >= tst)
hitsDn += (tv <= tst)
pUp = hitsUp/(reps+plus1)
pDn = hitsDn/(reps+plus1)
return thePvalue[alternative](pUp, pDn), tst
| python |
"""ETS Prediction View"""
__docformat__ = "numpy"
import datetime
import os
import warnings
from typing import Union
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.common.prediction_techniques import ets_model
from gamestonk_terminal.common.prediction_techniques.pred_helper import (
price_prediction_backtesting_color,
print_prediction_kpis,
print_pretty_prediction,
)
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.helper_funcs import (
export_data,
get_next_stock_market_days,
patch_pandas_text_adjustment,
plot_autoscale,
)
from gamestonk_terminal.rich_config import console
register_matplotlib_converters()
warnings.filterwarnings("ignore")
# pylint:disable=too-many-arguments
def display_exponential_smoothing(
ticker: str,
values: Union[pd.DataFrame, pd.Series],
n_predict: int,
trend: str = "N",
seasonal: str = "N",
seasonal_periods: int = 5,
s_end_date: str = "",
export: str = "",
time_res: str = "",
):
"""Perform exponential smoothing
Parameters
----------
ticker : str
Dataset being smoothed
values : Union[pd.DataFrame, pd.Series]
Raw data
n_predict : int
Days to predict
trend : str, optional
Trend variable, by default "N"
seasonal : str, optional
Seasonal variable, by default "N"
seasonal_periods : int, optional
Number of seasonal periods, by default 5
s_end_date : str, optional
End date for backtesting, by default ""
export : str, optional
Format to export data, by default ""
time_res : str
Resolution for data, allowing for predicting outside of standard market days
"""
if s_end_date:
if not time_res:
future_index = get_next_stock_market_days(
last_stock_day=s_end_date, n_next_days=n_predict
)
else:
future_index = pd.date_range(
s_end_date, periods=n_predict + 1, freq=time_res
)[1:]
if future_index[-1] > datetime.datetime.now():
console.print(
"Backtesting not allowed, since End Date + Prediction days is in the future\n"
)
return
df_future = values[future_index[0] : future_index[-1]]
values = values[:s_end_date] # type: ignore
# Get ETS model
model, title, forecast = ets_model.get_exponential_smoothing_model(
values, trend, seasonal, seasonal_periods, n_predict
)
if not forecast:
console.print("No forecast made. Model did not converge.\n")
return
if np.isnan(forecast).any():
console.print("Model predicted NaN values. Runtime Error.\n")
return
if not time_res:
l_pred_days = get_next_stock_market_days(
last_stock_day=values.index[-1],
n_next_days=n_predict,
)
else:
l_pred_days = pd.date_range(
values.index[-1], periods=n_predict + 1, freq=time_res
)[1:]
df_pred = pd.Series(forecast, index=l_pred_days, name="Price")
console.print(f"\n{title}")
console.print("\nFit model parameters:")
for key, value in model.params.items():
console.print(f"{key} {' '*(18-len(key))}: {value}")
console.print("\nAssess fit model:")
console.print(f"AIC: {round(model.aic, 2)}")
console.print(f"BIC: {round(model.bic, 2)}")
console.print(f"SSE: {round(model.sse, 2)}\n")
# Plotting
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.plot(values.index, values.values, lw=2)
# BACKTESTING
if s_end_date:
ax.set_title(f"BACKTESTING: {title} on {ticker}")
else:
ax.set_title(f"{title} on {ticker}")
ax.set_xlim(
values.index[0],
get_next_stock_market_days(df_pred.index[-1], 1)[-1],
)
ax.set_xlabel("Time")
ax.set_ylabel("Share Price ($)")
ax.grid(b=True, which="major", color="#666666", linestyle="-")
ax.minorticks_on()
ax.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
ax.plot(
[values.index[-1], df_pred.index[0]],
[values.values[-1], df_pred.values[0]],
lw=1,
c="tab:green",
linestyle="--",
)
ax.plot(df_pred.index, df_pred, lw=2, c="tab:green")
ax.axvspan(
values.index[-1],
df_pred.index[-1],
facecolor="tab:orange",
alpha=0.2,
)
_, _, ymin, ymax = plt.axis()
ax.vlines(
values.index[-1],
ymin,
ymax,
linewidth=1,
linestyle="--",
color="k",
)
dateFmt = mdates.DateFormatter("%m/%d/%Y")
ax.xaxis.set_major_formatter(dateFmt)
ax.tick_params(axis="x", labelrotation=45)
# BACKTESTING
if s_end_date:
ax.plot(
df_future.index,
df_future,
lw=2,
c="tab:blue",
ls="--",
)
ax.plot(
[values.index[-1], df_future.index[0]],
[
values.values[-1],
df_future.values[0],
],
lw=1,
c="tab:blue",
linestyle="--",
)
if gtff.USE_ION:
plt.ion()
fig.tight_layout()
plt.show()
# BACKTESTING
if s_end_date:
dateFmt = mdates.DateFormatter("%m-%d")
fig, ax = plt.subplots(1, 2, figsize=plot_autoscale(), dpi=PLOT_DPI)
ax0 = ax[0]
ax0.plot(
df_future.index,
df_future,
lw=2,
c="tab:blue",
ls="--",
)
ax0.plot(df_pred.index, df_pred, lw=2, c="green")
ax0.scatter(
df_future.index,
df_future,
c="tab:blue",
lw=3,
)
ax0.plot(
[values.index[-1], df_future.index[0]],
[
values.values[-1],
df_future.values[0],
],
lw=2,
c="tab:blue",
ls="--",
)
ax0.scatter(df_pred.index, df_pred, c="green", lw=3)
ax0.plot(
[values.index[-1], df_pred.index[0]],
[values.values[-1], df_pred.values[0]],
lw=2,
c="green",
ls="--",
)
ax0.set_title("BACKTESTING: Prices")
ax0.set_xlim(
values.index[-1],
df_pred.index[-1] + datetime.timedelta(days=1),
)
ax0.set_ylabel("Share Price ($)")
ax0.grid(b=True, which="major", color="#666666", linestyle="-")
ax0.legend(["Real data", "Prediction data"])
ax1 = ax[1]
ax1.axhline(y=0, color="k", linestyle="--", linewidth=2)
ax1.plot(
df_future.index,
100 * (df_pred.values - df_future.values) / df_future.values,
lw=2,
c="red",
)
ax1.scatter(
df_future.index,
100 * (df_pred.values - df_future.values) / df_future.values,
c="red",
lw=5,
)
ax1.set_title("BACKTESTING: % Error")
ax1.plot(
[values.index[-1], df_future.index[0]],
[
0,
100 * (df_pred.values[0] - df_future.values[0]) / df_future.values[0],
],
lw=2,
ls="--",
c="red",
)
ax1.set_xlim(
values.index[-1],
df_pred.index[-1] + datetime.timedelta(days=1),
)
ax1.set_xlabel("Time")
ax1.set_ylabel("Prediction Error (%)")
ax1.grid(b=True, which="major", color="#666666", linestyle="-")
ax1.legend(["Real data", "Prediction data"])
ax0.xaxis.set_major_formatter(dateFmt)
ax0.tick_params(axis="x", labelrotation=45)
ax1.xaxis.set_major_formatter(dateFmt)
ax1.tick_params(axis="x", labelrotation=45)
if gtff.USE_ION:
plt.ion()
fig.tight_layout()
plt.show()
# Refactor prediction dataframe for backtesting print
df_pred.name = "Prediction"
df_pred = df_pred.to_frame()
df_pred["Real"] = df_future
if gtff.USE_COLOR:
patch_pandas_text_adjustment()
console.print("Time Real [$] x Prediction [$]")
console.print(
df_pred.apply(price_prediction_backtesting_color, axis=1).to_string()
)
else:
console.print(df_pred[["Real", "Prediction"]].round(2).to_string())
console.print("")
print_prediction_kpis(df_pred["Real"].values, df_pred["Prediction"].values)
else:
# Print prediction data
print_pretty_prediction(df_pred, values.values[-1])
export_data(export, os.path.dirname(os.path.abspath(__file__)), "ets")
console.print("")
| python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of pyunicorn.
# Copyright (C) 2008--2019 Jonathan F. Donges and pyunicorn authors
# URL: <http://www.pik-potsdam.de/members/donges/software>
# License: BSD (3-clause)
#
# Please acknowledge and cite the use of this software and its authors
# when results are used in publications or published elsewhere.
#
# You can use the following reference:
# J.F. Donges, J. Heitzig, B. Beronov, M. Wiedermann, J. Runge, Q.-Y. Feng,
# L. Tupikina, V. Stolbova, R.V. Donner, N. Marwan, H.A. Dijkstra,
# and J. Kurths, "Unified functional network and nonlinear time series analysis
# for complex systems science: The pyunicorn package"
"""
climate
=======
Provides classes for generating and analyzing complex climate networks.
Related Publications
~~~~~~~~~~~~~~~~~~~~
[Donges2009c]_, [Donges2009a]_, [Donges2009b]_, [Donges2011a]_, [Zou2011]_,
[Tominski2011]_, [Heitzig2012]_
To do
~~~~~
- A lot - See current product backlog.
Known Bugs
~~~~~~~~~~
- ...
"""
from ..core import GeoNetwork, Grid, Network
from .climate_data import ClimateData
from .climate_network import ClimateNetwork
from .coupled_climate_network import CoupledClimateNetwork
from .coupled_tsonis import CoupledTsonisClimateNetwork
from .havlin import HavlinClimateNetwork
from .hilbert import HilbertClimateNetwork
from .map_plots import MapPlots
from .mutual_info import MutualInfoClimateNetwork
from .partial_correlation import PartialCorrelationClimateNetwork
from .rainfall import RainfallClimateNetwork
from .spearman import SpearmanClimateNetwork
from .tsonis import TsonisClimateNetwork
from .eventsynchronization_climatenetwork import \
EventSynchronizationClimateNetwork
#
# Set global constants
#
# Mean earth radius in kilometers
from ..core import EARTH_RADIUS
| python |
class AppriseNotificationFailure(Exception):
# Apprise returns false if something goes wrong
# they do not have Exception objects, so we're creating a catch all here
pass
| python |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2019 Lorenzo
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .abc import UnNamedBaseObject
from .common import NamedAPIObject
__all__ = (
"Machine",
)
class Machine(UnNamedBaseObject):
"""Represents a machine object from the API.
.. versionadded:: 0.1.5a
.. container:: operations
.. describe:: str(x)
Returns the machine item's name.
.. describe:: x[y]
Returns a move's y attribute.
.. describe:: x == y
Check if two moves are the same.
.. describe:: x != y
Check if two moves are *not* the same.
Attributes
----------
id: :class:`int`
The identifier for this machine.
item: :class:`NamedAPIObject`
The TM or HM item that corresponds to the machine.
move: :class:`NamedAPIObject`
The move that is taught by the machine.
version_group: :class:`NamedAPIObject`
The version group that the machine applies to.
"""
def __init__(self, data: dict):
super().__init__(data)
self.item = NamedAPIObject(data["item"])
self.move = NamedAPIObject(data["move"])
self.version_group = NamedAPIObject(data["version_group"])
def __str__(self) -> str:
return str(self.item)
| python |
# 文字列
import keyword
from importlib import import_module
import re
import string
'''
[文字列モジュール|string[|モジュール]]をインポートする
@prefix(sub;部分文字列)
@alt(先頭|最初|左[側|端])
@alt(末尾|最後|後ろ|右[側|端])
@alt(左側|左)
@alt(右側|右)
'''
string = import_module('string')
keyword = import_module('keyword')
s = 'ABC abc 123' # 文字列 s, s2, s3
s2 = 'a'
s3 = '123'
ch = 'A'
n = 1 # 整数 n, n1, n2
n2 = 3
filename = 'file.txt' # ファイル name
aStringList = ['A', 'B', 'C'] # 文字列(リスト|タプル)
''
'''
空文字[|を得る]
an empty string
'''
'\n'
'''
改行[|文字][|を得る]
'''
'\t'
'''
タブ[|文字][|を得る]
'''
' '
'''
空白[|文字][|を得る]
space
'''
string.ascii_letters
'''
@alt(全ての|全)
@alt(全部|全て|)
@alt(アルファベット|英字)
アルファベットの文字列
アルファベットを全部得る
'''
string.ascii_lowercase
'''
[アルファベットの|]小文字列
[アルファベットの|]小文字を全部得る
'''
string.ascii_uppercase
'''
[アルファベットの|]大文字列
[アルファベットの|]大文字を全部得る
'''
string.digits
'''
全ての数字
数字を全部得る
'''
string.hexdigits
'''
全ての十六進数字
十六進数字を全部得る
'''
string.octdigits
'''
全ての[8|八]進数字
[8|八]進数字を全部得る
'''
string.punctuation
'''
@alt(句読点|句点)
全ての句読点文字
句読点文字を全部得る
'''
string.printable
'''
@alt(印字可能な文字|印字できる文字|印字)
全ての印字可能な文字
印字可能な文字を全部得る
'''
string.whitespace
'''
全ての空白文字
空白文字を全部得る
'''
unicode = ord('A')
chr(unicode)
'''
@prefix(unicode;[文字コード|ユニコード])
unicodeを文字に変換する
unicodeに対応する文字[|を得る]
'''
ord(ch)
'''
@prefix(ch;文字)
@alt(ユニコード=[文字コード|ユニコード|ASCIIコード])
chをユニコードに変換する
chのユニコード[|を得る]
'''
[ord(ch) for ch in s]
'''
sをユニコード列に変換する
'''
s.upper()
'''
sを[全て|]大文字に変換する
[s中の|]小文字を大文字に変換する
'''
s.lower()
'''
sを[全て|]小文字に変換する
[s中の|]大文字を小文字に変換する
'''
s.casefold()
'''
@alt(ケース|大文字小文字)
sのケースを[変換する|整える]
sを[全て|]小文字に変換する
sを[積極的に|特殊文字も含め]小文字に変換する
'''
list(s)
'''
@alt(文字リスト|文字のリスト)
sを文字リストに変換する
s中の文字を列挙する
'''
s.split()
'''
@alt(区切る|分割する)
@alt(で_|によって|を[用い|使っ]て)
{sを|空白で}区切って、[文字列リストに変換する|列挙する]
{sを|空白で_}区切る
'''
map(int, s.split())
'''
@alt(整数リスト|整数のリスト|[|整]数列)
{sを|空白で_}区切って、整数リストに変換する
'''
sub = ';'
s.split(sub)
'''
@alt(セパレータ|区切り[|記号])
{sを|[sub|セパレータ]で}区切って、[文字列リストに変換する|列挙する]
{sを|[sub|セパレータ]で_}区切る
'''
__X__ = ','
s.split(__X__)
'''
@X(',';':';sep)
@Y(カンマ;コロン;[セパレータ|区切り])
{sを|__Y__で}[分割して|区切って]、文字列リストに変換する
{sを|__Y__で_}区切って、列挙する
'''
s.splitlines()
'''
{sを|改行で}[分割し|区切り]、文字列リストに変換する
{sを|改行で_}区切る
'''
s.rsplit()
'''
{sを|[末尾|最後|右]から|空白で}区切る
'''
s.rsplit(sub)
'''
{sを|[末尾|最後|右]から|subで}区切る
'''
sep = ' '
s.partition(sep)
'''
@alt(二分する|[二|]分割する|二つに分ける)
sを二分する
'''
s.partition(sub)
'''
{sを|[|最初の]subで_}二分する
'''
s.rpartition(sub)
'''
{sを|最後のsubで_}二分する
'''
s.partition(sub)[0]
'''
@alt(とき|時|場合)
@alt(二分し|二つに区切って)
@alt(分けた|分割した)
{sを|[|最初の]subで}二分し、[前半の|最初の|先頭の]文字列を得る
{sを|[|最初の]subで}分けたときの[前半の|最初の|先頭の]文字列[|を得る|を取り出す]
'''
s.partition(sub)[-1]
'''
{sを[|最初の]subで}二分し、[後半の|残りの]文字列を得る
{sを|[|最初の]subで}分けたときの[後半の|残りの]文字列[|を得る|を取り出す]
'''
s.rpartition(sub)[0]
'''
{sを|最後のsubで}二分し、[前半の|最初の|先頭の]文字列を得る
{sを|最後のsubで}分けたときの[前半の|最初の|先頭の]文字列[|を得る|を取り出す]
'''
s.rpartition(sub)[-1]
'''
{sを最後のsubで}二分し、[後半の|残りの]文字列を得る
{sを最後のsubで}分けたときの[後半の|残りの]文字列[|を得る|を取り出す]
'''
sub = 'A'
subnew = 'a'
s.replace(sub, newsub)
'''
@alt(置き換える|置換する)
@prefix(newsub;[新しい|別の]文字列)
文字列を置き換える
{文字列を|newsubで_}[|全部]置き換える
{s中のsubを|newsubに}[|全部]置き換える
{s中のsubを|s3に}[|全部]置き換える
'''
s.replace(sub, newsub, n)
'''
{文字列を|[回数を制限して|n回だけ]}置き換える
{s中のsubを|newsubに|回数を制限して}置き換える
'''
s.replace(sub, '')
'''
@alt(取り除く|除く|除去する|消す)
sからsubを[全て|]取り除く
'''
s.expandtabs(tabsize=n)
'''
s中のタブ文字を[|n個の]空白に[置き換える|する]
'''
s.strip()
'''
@alt(不要な=[|不要な|余分な])
sの両端から不要な[空白|空白と改行]を取り除く
sをトリムする
'''
s.strip(sub)
'''
sの両端からsubを取り除く
'''
s.lstrip()
'''
sの先頭[から|の]不要な[空白|空白やタブ]を取り除く
sを左トリムする
'''
__X__ = sub
s.lstrip(__X__)
'''
@X(sub;'\t';' ')
@Y(sub;タブ;空白)
sの[左[側|端]|先頭]から__Y__を取り除く
'''
s.rstrip()
'''
sの[右[側|端]|末尾]から[|不要な|余分な][空白|改行]を取り除く
sを右トリムする
'''
__X__ = sub
s.rstrip(__X__)
'''
@X(sub;'\t';'\n';' ')
@Y(sub;タブ;改行;空白)
sの[右[側|端]|末尾]から__Y__を取り除く
'''
文字列幅 = 10
s.zfill(文字列幅)
'''
@alt(ゼロ埋めする|パディングする)
sをゼロ埋めする
'''
str(n).zfill(文字列幅)
'''
[整数|数値]をゼロ埋めした文字列に変換する
'''
s.center(文字列幅)
'''
sを[センタリング|中央寄せ][に|]する
'''
s.ljust(文字列幅)
'''
sを左寄せ[に|]する
'''
s.rjust(文字列幅)
'''
sを右寄せ[に|]する
'''
s.capitalize()
'''
sをキャピタライズする
sの先頭だけ大文字化する
'''
s.swapcase()
'''
[sの|]大文字と小文字を[交換する|逆にする|入れ替える]
sのケースを[入れ替える|交換する|逆にする]
'''
aStringList = ['A', 'B', 'C']
s in aStringList
'''
@alt(含まれる|ある|存在する)
@prefix(aStringList;文字列リスト)
sがaStringListのいづれかどうか
sがaStringListに含まれるかどうか
'''
s = 'ABCDEFG'
sub = 'C'
start = 1
end = 3
sub in s
'''
部分文字列かどうか
{s中に|subが}含まれるかどうか
'''
sub not in s
'''
@alt(含まれない|ない|存在しない)
{s中に|subが}含まれないかどうか
'''
s.find(sub)
'''
{s中から|subを}[探す|見つける]
{sの先頭から|subを}[探す|見つける]
'''
s.find(sub, start) != -1
'''
@prefix(start;開始位置)
@prefix(end;終了位置)
{sのstart[以降に|より後に|先に]|subが}含まれるかどうか
'''
s.find(sub, start) == -1
'''
{sのstart[以降に|より後に|から先に]|subが}含まれないかどうか
'''
s.find(sub, 0, end) != -1
'''
{sのend[より前に|以前に]|subが}含まれるかどうか
'''
s.find(sub, 0, end) == -1
'''
{sのend[より前に|以前に]|subが}含まれないかどうか
'''
s.find(sub, start, end) != -1
'''
{sのstartとendの間に|subが}含まれるかどうか
{sのstartからとendの[間|範囲]に|subが}含まれるかどうか
'''
s.find(sub, start, end) == -1
'''
sのstart番目とend番目の間にsubが含まれないかどうか
{sのstart[|番目]からとend[|番目]までの[間|範囲]に|subが}含まれないかどうか
'''
s.find(sub, start)
'''
{sのstartから|subを}探す
'''
s.find(sub, 0, end)
'''
{sのendまで|subを}探す
'''
s.find(sub, start, end)
'''
subを範囲を指定して探す
{sのstartからendまで|subを}探す
'''
s.rfind(sub)
'''
{sの末尾から|subを}[探す|見つける]
'''
s.find(sub, start, end)
'''
subを範囲を指定して探す
{sの末尾から|範囲を指定してsubを}探す
'''
''.join(aStringList)
'''
@alt(連結する|結合する|つなげる|一つにする)
aStringListを連結する
aStringListを[連結して|]一つの文字列にする
'''
sep = ','
sep.join(aStringList)
'''
{aStringListを|sepを区切りとして}連結する
{aStringListを|sepを区切りにして}一つの文字列にする
'''
iterable = [1, 1, 2]
''.join(map(str, iterable))
'''
{iterableを|文字列[に変換し|とし]て}連結する
aStringListを[連結して|]一つの文字列にする
'''
__X__.join(map(str, iterable))
'''
@X(' ';',';'\t';'\n';sub)
@Y(空白;カンマ;タブ;'改行;部分文字列)
{iterableを|文字列[リスト|]に}変換して、__Y__で_連結する
'''
s.count(sub)
'''
@alt(カウントする|数える)
@alt(出現数=出現[|回数]|登場[|回数])
s中のsubの出現数[をカウントする|]
s中のsubをカウントする
s中にsubがいくつか含まれるか[調べる|カウントする]
'''
s.count(sub, start, end)
'''
@alt(までの範囲|の[範囲|間])
sのstartからendまでの範囲でsubの出現数[をカウントする|]
sのstartからendまでの範囲でsubをカウントする
sののstartからendまでの間にsubがいくつか含まれるか[調べる|カウントする]
'''
s.startswith(sub)
'''
@alt(接頭辞|先頭|プレフィックス|左[側|端])
@alt(始まる|開始する)
{subが|subで}始まるかどうか
sの接頭辞[が|は]subかどうか
'''
s.startswith(sub, start)
'''
{sのstart以降が|subで}始まるかどうか
'''
s.endswith(sub)
'''
@alt(接尾辞|末尾|サフィックス|右[側|端])
@alt(終わる|終了する)
{sが|subで}終わるかどうか
sの接尾辞[が|は]subかどうか
'''
s.removeprefix(sub)
'''
@alt(安全に|エラーなく)
{[|安全に]|sの接頭辞から|subを}取り除く
'''
s.removesuffix(sub)
'''
{[|安全に]|sの接尾辞から|subを}取り除く
'''
__X__ = '.csv'
filename.endswith(__X__)
'''
@X('.csv';'.txt';'.tsv';'.json')
@Y(CSV;テキスト;TSV;JSON)
@prefix(filename;ファイル名)
{filenameが|__Y__ファイル}かどうか
'''
s.isupper()
'''
sが[全て|]大文字かどうか
'''
s.islower()
'''
sが[全て|]小文字かどうか
'''
s.isdigit()
'''
sが[全て|]数字かどうか
'''
s.isalpha()
'''
sが[全て|]アルファベットかどうか
'''
s.isalnum()
'''
sが[全て|]英数字かどうか
'''
s.isascii()
'''
@alt(アスキー文字|ASCII文字)
sが[全て|]アスキー文字かどうか
'''
s.isspace()
'''
sが[全て|]空白[文字|][からなる|]かどうか
'''
s.isdecimal()
'''
s[は|が][全て|]十進数字かどうか
'''
s.isnumeric()
'''
s[は|が][全て|]数値かどうか
'''
any(c.__X__() for c in s)
'''
@alt(含まれる|ある)
@X(isupper|islower|isdigit|isalpha|isalnum|isspace|isascii)
@Y(大文字|小文字|数字|アルファベット|英数字|空白|アスキー文字)
{s内に|[ひとつでも|]__Y__が}含まれるかどうか
'''
any(not c.__X__() for c in s)
'''
{s中に|[ひとつでも|]非__Y__が}含まれるかどうか
'''
s.isidentifier()
'''
s[は|が][全て|]識別子名かどうか
'''
keyword.iskeyword(s)
'''
s[は|が][Pythonの|]キーワードかどうか
'''
s.isprintable()
'''
s[は|が][全て|]印字できるかどうか
'''
s.istitle()
'''
s[は|が]タイトルケースかどうか
'''
s.encode(encoding='utf-8', errors='strict')
'''
{sを|[UTF8で|]}バイト列に変換する
'''
s.encode(encoding='sjis', errors='ignore')
'''
{sを|SJISで}バイト列に変換する
'''
s.encode(encoding='unicode_escape')
'''
{sを|ユニコードエスケープで}バイト列に変換する
'''
encoding = 'utf-8'
s.encode(encoding=encoding)
'''
@prefix(encoding;[エンコーディング|文字コード])
{sを|encodingで_}バイト列に変換する
'''
s.encode(errors='ignore')
'''
{エラーを無視して|sを}バイト列に変換する
'''
args = []
formatText = ''
formatText.format(*args)
'''
@test(text='<{}>';$$)
@alt(フォーマットする|文字列整形する)
@prefix(formatText;[書式|テンプレート])
formatTextを{argsを|引数として}フォーマットする
'''
aDict = {'A': '1'}
formatText = '{A}'
formatText.format_map(aDict)
'''
@test(text='<{}>';mapping={};$$)
formatTextをaDictでフォーマットする
'''
len(s)
'''
sの[長さ|文字数|大きさ][|を得る]
'''
s[0]
'''
sの[先頭|最初][|の文字][|を得る]
'''
s[-1]
'''
sの[末尾|最後][|の文字][|を得る]
'''
s[n]
'''
sのn番目[|の文字][|を得る]
'''
s == s2
'''
2つの文字列[は|が][同じ|等しい]かどうか
'''
s != s2
'''
2つの文字列[は|が][等しく|同じで]ないかどうか
'''
s < s2
'''
{2つの文字列を|辞書順で}比較する
{s[が|は]s2より|辞書順で}前かどうか
'''
s > s2
'''
{s[が|は]s2より|辞書順で}後かどうか
'''
s.casefold() == s2.casefold()
'''
@alt(ケースを無視して|大文字小文字を無視して)
2つの文字列[が|は]ケースを無視して同じか
'''
s.casefold() < s2.casefold()
'''
2つの文字列をケースを無視して比較する
'''
# Tips
('ァ' <= ch <= 'ン')
'''
@alt(片仮名|カタカナ)
@alt(平仮名|ひらがな)
s[が|は]片仮名かどうか
'''
('ぁ' <= ch <= 'ん')
'''
s[が|は]平仮名かどうか
'''
('\u4E00' <= ch <= '\u9FD0')
'''
s[が|は]漢字かどうか
'''
re.search('[\u4E00-\u9FD0]', s)
'''
{s[|内|中]に|漢字が}[含まれる|使われている]かどうか
'''
re.search('[あ-んア-ン\u4E00-\u9FD0]', s)
'''
{s[|内|中]に|日本語が}[含まれる|使われている]かどうか
'''
''.join([chr(ord(ch) - 96) if ('ァ' <= ch <= 'ン') else ch for ch in s])
'''
[sの|]片仮名を平仮名に変換する
'''
''.join([chr(ord(ch) + 96) if ('ぁ' <= ch <= 'ん') else ch for ch in s])
'''
[sの|]平仮名を片仮名に変換する
'''
s.translate(str.maketrans('0123456789', '0123456789'))
'''
[sの|]全角数字を半角数字に変換する
'''
| python |
""" Functions for running the PEPR model defined in the
--Univeral visitation law of human mobility-- paper
(https://www.nature.com/articles/s41586-021-03480-9).
"""
import random
import time
import itertools as it
import matplotlib.pyplot as plt
import numpy as np
def levy_flight(num_steps: int, alpha: float) -> np.array:
"""
Performs a levy flight in 2D starting at the
origin (0,0).
Args:
num_steps: number of step in flight
alpha: shape parameter in jump distribution
Returns:
x: np.array of x coordinates of trajectory
y: np.array of y coordinates of trajectory
"""
# Set up
x_start, y_start = 0,0
x,y = [x_start], [y_start]
x_curr, y_curr = x_start, y_start
# Execute trajectory
for i in range(num_steps-1):
r = np.random.pareto(alpha)
theta = np.random.uniform(0,2*np.pi)
x_jump, y_jump = r*np.cos(theta), r*np.sin(theta)
x_curr, y_curr = x_curr + x_jump, y_curr + y_jump
x.append(x_curr)
y.append(y_curr)
x = np.array(x)
y = np.array(y)
return x,y
def levy_jump(x_curr: int, y_curr: int, alpha: float, box_size: float) -> [int, int]:
"""
Does a levy jump.
Args:
x_curr: current x coordinate
y_curr: current y coordiante
alpha: shape parameter in jump distribution
box_size: size of grid box in which process is taking place
Returns:
x_curr: x coordinate after jump
y_curr: y coordinate after jump
"""
r = np.random.pareto(alpha)
theta = np.random.uniform(0,2*np.pi)
x_jump, y_jump = r*np.cos(theta) / box_size, r*np.sin(theta) / box_size
x_curr, y_curr = int(x_curr + x_jump), int(y_curr + y_jump)
return x_curr, y_curr
def revisit(visited_places: dict) -> [int, int, dict]:
""" Chooses a place to revist, where place i is chosen
with prob \propto S_i, S_i = number of visits to i
Args:
visited_places[(x,y)] = number visits to place (x,y)
Returns:
x_curr: new x coordinate
y_curr: new y coordiante
visited_places: updated list of visited places
"""
freqs = np.array(list(visited_places.values()))
freqs = freqs / (1.0*sum(freqs))
places_indices = range(len(freqs))
go_to = np.random.choice(places_indices, p=freqs)
x_curr, y_curr = list(visited_places.keys())[go_to]
visited_places[(x_curr,y_curr)] += 1
return x_curr, y_curr, visited_places
def xy_to_cell_id(x: int, y: int, Ngrid: int):
""" Convert a position (x,y) to the grid cell
index (where upper left hand column is indexed
0 & indexing is done rowwise)
"""
return x + y*Ngrid
def cell_id_to_xy(cell_id: int, Ngrid: int) -> [int, int]:
""" The reverse of the above function """
y,x = divmod(cell_id, Ngrid)
return x,y
def dist(x1: int, y1: int, x2: int, y2: int) -> float:
""" L2 distance between points
(x1,y1) and (x2,y2)
"""
d = (x2-x1)**2 + (y2-y1)**2
return np.sqrt(d)
def update_data(x_curr: int, y_curr: int, home: int, data: dict, agent_id, Ngrid: int) -> dict:
"""
The data dictionary contains a tally of
all the visitors to a given cell, where
cells are index from 0, 1, 2, ... N_cells:
data[cell_id] = [agent_id, f, home, r, E]
So data[7] = [ [10, 2, 13, 45, 90], [2, 5, (3,3), 10, 100] ]
Means there were two visitors to cell 7.
The first visitor was agent 10, visited
the cell twice, has home at cell 13
which is a distance 45 from cell 7, and
has expended E = r*f = 45*2 = 90 travel
energy units in traveling to cell 7.
Args:
x_curr: x coordinate of current position of agent
y_curr: y coordinate of current position of agent
home: grid cell index of home of current agent
data: defined above
agent_id: ID of agent
Returns:
data: updated data dictionary
"""
f = 1 # we know it's a new place
x_home, y_home = cell_id_to_xy(home, Ngrid)
r = dist(x_curr, y_curr, x_home, y_home)
key = xy_to_cell_id(x_curr, y_curr, Ngrid)
val = [agent_id, f, home, r, r*f]
# If first visit to cell update
if key not in data:
data[key] = [val]
# If not, then grab all agent
# features vectors at the cell
# and update the given agent ID's
# featutre vector; by feature vector
# I mean [agent_id, f, home, r, E]
else:
rows = data[key]
for i,row in enumerate(rows):
if row[0] == agent_id:
[agent_id, f, home, r, E] = row
new_row = [agent_id, f+1, home, r, r*(f+1)]
data[key][i] = new_row
return data
data[key].append(val)
return data
def add_to_visited_places(x_curr: int, y_curr: int, visited_places: dict) -> dict:
"""
Visited placs[(x,y)] contains the number of visits
that the cell (x,y). This updates the count to the
current position (x_curr, y_curr)
Args:
x_curr: x coordinate of current position
y_curr: y coordinate of current position
visited_place: dict defined above
Returns:
visited_place: updated dict
"""
if (x_curr, y_curr) not in visited_places:
visited_places[(x_curr, y_curr)] = 1
else:
visited_places[(x_curr, y_curr)] += 1
return visited_places
def merge(v1: dict, v2: dict) -> dict:
""" Merges two dictionaries """
for key in v2:
if key not in v1:
v1[key] = v2[key]
else:
v1[key] += v2[key]
return v1
def levy_jump_with_PE(x_curr: int, y_curr: int, alpha: float, R: float, nu: float, box_size: int, data: dict, Ngrid: int):
""" Does a levy flight, except now the
angle is chosen according to Preferential
Exploration.
Args:
x_curr: current x coordinate
y_curr: current y coordiante
alpha: shape parameter in jump distribution
R: sensing radius (see defintion of Preferntial exploration)
nu: asymmetry parameter (see defintion of Preferntial exploration))
box_size: size of grid box in which process is taking place
data: data[cell_id] = [ f_agent1, f_agent2 ] contains list of
feature vectors of agents that have visited that cell
where f_agent1 = [agent_id, f - frequency of visit, home cell , r-distance from home cell to cell , r*f]
Ngrid: number of grid
Returns:
x_curr: x coordinate after jump
y_curr: y coordinate after jump
"""
r = np.random.pareto(alpha)
theta = sample_angle(x_curr, y_curr, data, R, nu, Ngrid)
x_jump, y_jump = r*np.cos(theta) / box_size, r*np.sin(theta) / box_size
x_curr, y_curr = int(x_curr + x_jump), int(y_curr + y_jump)
return x_curr, y_curr
def find_neighbours(x_curr: int, y_curr: int, R: int, Ngrid: int) -> [(int, int), (int,int)]:
""" Return all neighbours on a grid
in the first R layers
So if R = 1, then you return
the eight neighbours surrounding
a given cell
Auxiliary function for 'sample_angle'
method defined below
"""
neighbours = [(x_curr + col, y_curr + row) for row in range(-R,R+1) for col in range(-R,R+1) \
if 0 <= x_curr + col <= Ngrid-1 and 0 <= y_curr + row <= Ngrid-1 ]
if len(neighbours) > 0:
neighbours.remove((x_curr, y_curr))
return neighbours
def get_energies(neighbours: list, data: dict, Ngrid: int) -> list:
"""
Grabs all the energies of the neighbour cells
Auxilary functions for 'sample_angle' method below
"""
Es = np.ones(len(neighbours))
for i,n in enumerate(neighbours):
key = xy_to_cell_id(n[0], n[1], Ngrid)
E = 0
if key not in data:
Es[i] = E
else:
for row in data[key]:
E += row[-1]
Es[i] += E
return Es
def sample_angle(x_curr: int, y_curr: int, data: dict, R: int, nu: float, Ngrid: int) -> float :
"""
An agent following preferential exploration jumps a distance r
in a direction theta, where
r ~ Pareto(alpha) (just like a levy flight)
theta ~ E(theta;R)^nu (see paper)
where E is the aggregate energy or all
cells within a distance R (see paper)
This method samples the angle theta
"""
if R == 0:
return np.random.uniform(0,2*np.pi)
# Find which neighbour to jump to
neighbours = find_neighbours(x_curr, y_curr,R, Ngrid)
energies = get_energies(neighbours, data, Ngrid)
energies += np.ones(len(energies))
energies = energies**nu
if sum(energies) == 0:
index_of_chosen_neighbour = np.random.choice(range(len(neighbours)))
else:
energies /= sum(energies)
index_of_chosen_neighbour = np.random.choice(range(len(neighbours)), p = energies)
# Convert this to a jump angle
x1,y1 = x_curr, y_curr
(x2,y2) = neighbours[index_of_chosen_neighbour]
angle = find_angle(x1,y1,x2,y2)
# I need to fill in the missing angles here
# Now I want the final angle to be Uniform(angle-X, angle+X)
# where X is the nearest angle.
angle_to_neighbours = [abs(find_angle(x1,y1,x2,y2) - angle) for (x2,y2) in neighbours if (x2,y2) != (x1,y1)]
angle_to_neighbours = [x for x in angle_to_neighbours if x != 0]
X = min(angle_to_neighbours)
angle_final = np.random.uniform(angle-X,angle+X)
return angle_final
def find_angle(x1:int, y1: int, x2: int, y2: int) -> float:
"""
Finds the angle betwen the two points
(x1, y1) and (x2, y2)
"""
# Find angle
dx, dy = x2-x1, y2-y1
r = np.sqrt( dx**2 + dy**2 )
angle = np.arccos(dx / r)
# Find quandrant
if dy < 0:
angle = np.pi + angle
return angle
def clean_Es(Es):
""" Auxiliary method """
return [ x for x in Es if x != 0 ]
def preferential_exploration(num_steps: int, data: dict, alpha: float, rho: float, gamma: float, R: int, nu: float, x_curr: int, y_curr: int, agent_id: int, Ngrid: int, box_size: int) -> dict:
"""
Performs preferential exploration for a single agent. See paper for defintion
of the process.
Args:
num_steps: number of steps in simulation
data: data[cell_id] = [ f_agent1, f_agent2 ] contains list of
feature vectors of agents that have visited that cell
where the 'feature vector' is
f_agent1 = [agent_id, f - frequency of visit, home cell , r-distance from home cell to cell , r*f]
R: Sensing radius (see definition of Preferntial exploration)
nu: Model parameter (see definition of Preferential exploration)
x_curr: x coordinate of current position
y_curr: y coordiante of current position
agent_id: ID of agent doing the PERP
Ngrid: as implied
box_size: as implied (size of grid cell)
Returns:
data: updated with trajectory of walker
"""
# Update the data dictionary
home = xy_to_cell_id(x_curr,y_curr,Ngrid)
f, r, E = 1,0,0
val = [agent_id, f, home, r, E]
if home not in data:
data[home] = [val]
else:
data[home].append(val)
# Set up the dict of visited places
# Need this for the preferential
# return part of the mechanism
visited_places = {} # {(x,y):freq}
visited_places[(x_curr,y_curr)] = 1
# Do walk
for i in range(num_steps-1):
# Find odds of exploring new location
num_visited = len(visited_places)
prob_new = rho*num_visited**(-gamma) # defined in the Song model
temp = np.random.rand()
# Go to new location
if temp <= prob_new:
x_curr, y_curr = levy_jump_with_PE(x_curr, y_curr, alpha, R, nu, box_size, data, Ngrid)
#If jump has taken you outside the box, stop
if x_curr < 0 or x_curr >= Ngrid or y_curr < 0 or y_curr >= Ngrid:
break
visited_places = add_to_visited_places(x_curr, y_curr, visited_places)
data = update_data(x_curr, y_curr, home, data, agent_id, Ngrid)
# Return to previously visited location
else:
x_curr, y_curr, visited_places = revisit(visited_places)
cell_id = xy_to_cell_id(x_curr, y_curr, Ngrid)
list_of_agents = data[cell_id]
# find index of agent
for j in range(len(list_of_agents)):
if list_of_agents[j][0] == agent_id:
break
# then update that list
[agent_id, f, home, r, E] = list_of_agents[j]
new_row = [agent_id, f+1, home, r, r*(f+1)]
data[cell_id][j] = new_row
# walk is done and data has been updated
# so just return
return data
def spatial_plot(data: dict, homes: list, Ngrid: int) -> None:
""" This plots various quantities at each
cell in an (x,y) grid
1) The total visitation (number of visits to a give)
2) The effective travel distance per visitor to that cell
3) Home locations
This method is used after the main simulation
is run. It plots the data collected.
Args:
data: defined in above method
homes: list of homes (as cell indices) of agents
Ngrid: number
Returns:
Plots a Figure inline
"""
V, E = np.zeros((Ngrid, Ngrid)), np.zeros((Ngrid, Ngrid)),
for key in data.keys():
#Find visitation
vals = data[key]
x,y = cell_id_to_xy(key,Ngrid)
visitation = len(vals)
V[x][y] = visitation
#Find energy
Es = []
for agent_ID, f_temp, home, r, E1 in vals:
Es.append(E1)
E_mean = np.mean(Es)
E[x][y] = E_mean
#Homes
H = np.zeros((Ngrid, Ngrid))
for x,y in homes:
H[x][y] += 1
plt.figure(figsize=(12,6))
ax1 = plt.subplot(131)
plt.imshow(V)
plt.title('Visitation')
ax2 = plt.subplot(132)
plt.imshow(E)
plt.title('Effective travel distance per visitor')
ax3 = plt.subplot(133)
plt.imshow(H)
plt.title('Homes')
return
| python |
# -*- coding: utf-8 -*-
from authomatic import providers
class MozillaPersona(providers.AuthenticationProvider):
pass
| python |
import requests
from kong.api import API
class Connection:
def __init__(self, url='http://localhost:8001'):
self.url = url
def _get(self, path='', **request_args):
return requests.get(self.url + path, **request_args)
def _post(self, path='', **request_args):
return requests.post(self.url + path, **request_args)
def _patch(self, path='', **request_args):
return requests.patch(self.url + path, **request_args)
def _delete(self, path='', **request_args):
return requests.delete(self.url + path, **request_args)
class KongConnection(Connection):
def _get_apis(self, response):
data = response.get('data', [])
return [self._get_api(**attributes) for attributes in data]
def get_apis(self):
response = self._get('/apis').json()
return self._get_apis(response)
def _get_api(self, **attributes):
return API(is_new=False, **attributes)
def get_api(self, id=''):
attributes = self._get('/apis/' + id).json()
return self._get_api(**attributes)
def create_api(self, api):
response = self._post('/apis', json=api.attributes).json()
print(response, api.attributes)
api.update_attributes(**response)
return api
def update_api(self, api):
print('will update')
response = self._patch('/apis/' + api.attributes.get('id', ''), json=api.attributes).json()
print(response)
api.update_attributes(**response)
return api
def delete_api(self, api):
self._delete('/apis/' + api.attributes.get('id', ''))
def sync_apis(self, apis):
online_apis = {api.attributes.get('name'): api for api in self.get_apis()}
for api in online_apis:
if api in apis:
# only update if there is a change in attributes
if online_apis[api].difference(apis[api]):
online_apis[api].update_attributes(**apis[api].attributes)
online_apis[api].commit(self)
# flag that we do not want to create this
del apis[api]
else:
online_apis[api].delete(self)
for api in apis:
print(api, apis[api].attributes)
apis[api].commit(self)
| python |
from discoPy.rest.base_request.base_request import BaseRequestAPI
class StageData(BaseRequestAPI):
'''Contains a collection of stage related methods.'''
def __init__(self, token: str, url: str=None):
super().__init__(token, url)
def create_stage_instance(self, channel_id, topic: str, privacy_level: int=None) -> dict:
'''https://discord.com/developers/docs/resources/stage-instance#create-stage-instance'''
payload: dict = {
'channel_id': channel_id,
'topic': topic,
'privacy_level': privacy_level
}
payload: dict = {k:v for k,v in payload.items() if v is not None}
return self._request('POST', params=payload, uri=f'/stage-instances')
def get_stage_instance(self, channel_id) -> dict:
'''https://discord.com/developers/docs/resources/stage-instance#get-stage-instance'''
return self._request('GET', uri=f'/stage-instances/{channel_id}')
def modify_stage_instance(self, topic: str=None, privacy_level=None) -> dict:
'''https://discord.com/developers/docs/resources/stage-instance#modify-stage-instance'''
payload: dict = { 'topic': topic, 'privacy_level': privacy_level }
payload: dict = {k:v for k,v in payload.items() if v is not None}
return self._request('PATCH', params=payload, uri=f'/stage-instances/{channel_id}')
def delete_stage_instance(self) -> dict:
'''https://discord.com/developers/docs/resources/stage-instance#delete-stage-instance'''
return self._request('DELETE', uri=f'/stage-instances/{channel_id}') | python |
#!/usr/bin/env python3
# Python fizzbuzz implementation
for num in range(1, 25):
#check if number is divisible by both 3 and 5
if num % 3 == 0 and num % 5 == 0:
print("FizzBuzz")
#check if number divisible by 3
elif num % 3 == 0:
print("Fizz")
# check if number is divisible by 5
elif num % 5 == 0:
print("Buzz")
else:
# return number not divisible by 3 or 5
print(num)
| python |
import re
from dataclasses import dataclass
from typing import List, Tuple, Optional
import attr
from new.data_aggregation.utils import MethodSignature
@dataclass(frozen=True)
class Scope:
name: Optional[str] = None # attr.attrib()
bounds: Optional[Tuple[int, int]] = None # attr.attrib()
type: Optional[str] = None # attr.attrib()
class AST:
def __init__(self, children: List['AST'] = None):
self.label = None
self.children = children or []
self.bounds: Optional[Tuple[int, int]] = None
self.type = ''
def __repr__(self):
s = 'Tree(label = ' + self.label + ', bounds=' + str(self.bounds) + ', type=' + self.type + ', '
for child in self.children:
s += repr(child) + ', '
s += ")"
return s
def get_method_names_and_bounds(self):
paths = self.paths(self, self.label + '::', [])
method_names_and_bounds = dict()
for scope in paths:
if scope.type in ['method', 'constructor', 'static_init']:
method_names_and_bounds[MethodSignature(scope.name, scope.type)] = scope
return method_names_and_bounds
def paths(
self,
node: 'AST',
prefix: str,
paths: List[Scope]
) -> List[List[Scope]]:
for child in node.children:
if child.type != 'code':
paths.extend(self.paths(child, prefix + child.label + '::', [Scope(name=prefix + '::' + child.label, bounds=child.bounds, type=child.type)]))
return paths
class Parser:
kotlin_patterns = {
'pattern_method_name': re.compile(
'(?:override|internal|public|private|protected|static|final|native|synchronized|abstract|transient)* *(fun)+[$_\w<>\[\]\s]*\s+[\$_\w]+\([^\)]*\)?\s*?'),
'pattern_constructor_name': re.compile("(init|constructor)+ *(\([^\)]*\))?"),
'pattern_class': re.compile("(?:open|public|protected|private|static|data)? *(?:class|object|interface)\s+\w+"),
'pattern_static': re.compile("(companion object ) *\{")}
java_patterns = {
'pattern_method_name': re.compile(
'(?:(?:public|private|protected|static|final|native|synchronized|abstract|transient)+\s+)+[_@\w<>\[\]\s+,\?]*[\$_\w]+\([^\)]*\)?\s*'),
'pattern_constructor_name': re.compile("(?:public|protected|private|static) *\w+\([^\)]*\)+"),
'pattern_class': re.compile("(?:public|protected|private|static)? *(abstract +)?(?:class|interface)\s+\w+"),
'pattern_static': re.compile("(static)\s+\{")}
declaration_patterns = []
def __init__(self, language: str = 'java'):
self.brackets_positions = []
self.labels = []
self.declaration_types = ['method', 'constructor', 'class', 'static_init']
self.brackets_positions = []
if language == 'java':
self.declaration_patterns = self.java_patterns.values()
elif language == 'kotlin':
self.declaration_patterns = self.kotlin_patterns.values()
def parse(self, txt: str, filename: str = ''):
self.brackets_positions.clear()
self.labels.clear()
self.brackets_positions.append((-1, 'start'))
try:
self.recursive_parsing(txt)
except Exception:
return self.create_node()
self.find_declarations(txt)
self.fill_spaces()
ast, _ = self.construct_ast(curr_position=0)
ast.label = filename
ast.type = 'file'
ast.bounds = (0, ast.children[0].bounds[1] if ast.children else 0)
return ast
@staticmethod
def create_node(label=('', '')) -> AST:
root = AST(children=[])
root.label = label[0]
root.type = label[1]
return root
def construct_ast(self, label: Tuple[str, str] = ('', ''), pos: int = 0, curr_position: int = 0) -> Tuple[AST, int]:
root = self.create_node(label)
for i, val in enumerate(self.brackets_positions[1:]):
if i < curr_position:
continue
pos_end, bracket = val
if bracket == '{':
child_label = self.labels[curr_position]
curr_position += 1
if child_label[1] in self.declaration_types:
child, curr_position = self.construct_ast(child_label, pos_end, curr_position)
else:
child, curr_position = self.construct_ast(('code', 'code'), pos_end, curr_position)
root.children.append(child)
else:
curr_position += 1
root.bounds = (pos, pos_end)
return root, curr_position
return root, -1
def recursive_parsing(self, txt: str, pos: int = 0):
next_pos = 0
for i, char in enumerate(txt[pos:], pos):
if i <= next_pos:
continue
if char == '{':
self.brackets_positions.append((i, '{'))
pos = i + 1
next_pos = self.recursive_parsing(txt, pos)
if char == '}':
self.brackets_positions.append((i, '}'))
return i
def fill_spaces(self):
j = 0
for i in range(1, len(self.brackets_positions)):
if j < len(self.labels) and self.labels[j][2] <= self.brackets_positions[i][0]:
j += 1
continue
self.labels.insert(j, ('code', 'code', self.brackets_positions[0]))
j += 1
def find_declarations(self, code: str):
all_declarations = []
for declaration_pattern, type in zip(self.declaration_patterns, self.declaration_types):
declarations = self.find_declarations_by_pattern(declaration_pattern, code, type)
if declarations:
if type == 'static_init':
declarations = [('static', type, info[2] - (len(info[0]) - len('static'))) for info in declarations]
all_declarations.extend(declarations)
all_declarations.sort(key=lambda x: x[2])
self.labels = all_declarations # TODO: return it w/o state
@staticmethod
def find_declarations_by_pattern(pattern: re.Pattern, code: str, declaration_type: str) -> List[
Tuple[str, str, int]]:
declarations = [(m.group(0), declaration_type, m.end(0)) for m in re.finditer(pattern, code)]
if declaration_type == "method":
declarations = [(i[0].split('(')[0], i[1], i[2]) for i in declarations]
return declarations
| python |
# test function dot
def projCappedSimplex():
import numpy as np
from limetr.utils import projCappedSimplex
ok = True
# setup test problem
# -------------------------------------------------------------------------
w = np.ones(10)
sum_w = 9.0
tr_w = np.repeat(0.9, 10)
my_w = projCappedSimplex(w, sum_w)
tol = 1e-10
err = np.linalg.norm(tr_w - my_w)
ok = ok and err < tol
if not ok:
print('tr_w', tr_w)
print('my_w', my_w)
return ok
| python |
# Author: Mikita Sazanovich
import argparse
import itertools
import os
import sys
import numpy as np
import tensorflow as tf
sys.path.append('../')
from deepq import StatePotentialRewardShaper, Estimator, StatePreprocessor, PrioritizedReplayBuffer
from deepq import get_last_episode
from dotaenv import DotaEnvironment
from dotaenv.codes import STATE_DIM, ACTIONS_TOTAL
def copy_model_parameters(sess, estimator1, estimator2):
"""
Copies the model parameters of one estimator to another.
Args:
sess: Tensorflow session instance
estimator1: Estimator to copy the parameters from
estimator2: Estimator to copy the parameters to
"""
e1_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator1.scope)]
e1_params = sorted(e1_params, key=lambda v: v.name)
e2_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator2.scope)]
e2_params = sorted(e2_params, key=lambda v: v.name)
update_ops = []
for e1_v, e2_v in zip(e1_params, e2_params):
op = e2_v.assign(e1_v)
update_ops.append(op)
sess.run(update_ops)
def make_epsilon_greedy_policy(estimator, acts):
"""
Creates an epsilon-greedy policy based on a given Q-function approximator and epsilon.
Args:
estimator: An estimator that returns q values for a given state
acts: Number of actions in the environment.
Returns:
A function that takes the (sess, state, epsilon) as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(sess, state, epsilon):
A = np.ones(acts, dtype=float) * epsilon / acts
q_values = estimator.predict(sess, np.expand_dims(state, 0))[0]
best_action = np.argmax(q_values)
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
def populate_replay_buffer(replay_buffer, action_sampler, env):
print("Populating replay memory...")
state = env.reset()
state = StatePreprocessor.process(state)
done = False
for t in itertools.count():
if done or len(state) != STATE_DIM:
break
action_probs = action_sampler(state)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
print("Step {step} state: {state}, action: {action}.".format(step=t, state=state, action=action))
next_state, reward, done, _ = env.step(action=action)
next_state = StatePreprocessor.process(next_state)
replay_buffer.push(state, action, next_state, done, reward)
state = next_state
def deep_q_learning(sess,
env,
q_estimator,
target_estimator,
num_steps,
experiment_dir,
replay_memory_size=5000,
update_target_estimator_every=500,
discount_factor=0.999,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=10000,
update_q_values_every=4,
batch_size=32,
restore=True):
# Create directories for checkpoints and summaries
checkpoint_dir = os.path.join(experiment_dir, "checkpoints")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_path = os.path.join(checkpoint_dir, "model")
reward_dir = os.path.join(experiment_dir, "rewards")
if not os.path.exists(reward_dir):
os.makedirs(reward_dir)
reward_writer = tf.summary.FileWriter(reward_dir)
starting_episode = 0
saver = tf.train.Saver()
if restore:
starting_episode = get_last_episode(reward_dir)
# Load a previous checkpoint if we find one
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
if latest_checkpoint:
print("Loading model checkpoint {}...\n".format(latest_checkpoint))
saver.restore(sess, latest_checkpoint)
total_t = sess.run(tf.train.get_global_step())
# The epsilon decay schedule
epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)
reward_shaper = StatePotentialRewardShaper('replays/')
reward_shaper.load()
replay_buffer = PrioritizedReplayBuffer(
replay_memory_size=replay_memory_size,
total_steps=num_steps,
reward_shaper=reward_shaper,
discount_factor=discount_factor,
save_dir=experiment_dir)
# The policy we're following
policy = make_epsilon_greedy_policy(q_estimator, ACTIONS_TOTAL)
# Populate the replay memory with initial experience
action_sampler = lambda state: policy(sess, state, epsilons[min(total_t, epsilon_decay_steps-1)])
populate_replay_buffer(replay_buffer, action_sampler, env)
print('Training is starting...')
# Training the agent
for i_episode in itertools.count(starting_episode):
episode_reward = 0
multiplier = 1
# Save the current checkpoint
saver.save(tf.get_default_session(), checkpoint_path)
# Reset the environment
state = env.reset()
state = StatePreprocessor.process(state)
done = False
# One step in the environment
for t in itertools.count():
if total_t >= num_steps:
return
eps = epsilons[min(total_t, epsilon_decay_steps-1)]
if done or len(state) != STATE_DIM:
print("Finished episode with reward", episode_reward)
summary = tf.Summary(value=[tf.Summary.Value(tag="rewards", simple_value=episode_reward)])
reward_writer.add_summary(summary, i_episode)
summary = tf.Summary(value=[tf.Summary.Value(tag="eps", simple_value=eps)])
reward_writer.add_summary(summary, i_episode)
break
# Maybe update the target estimator
if total_t % update_target_estimator_every == 0:
copy_model_parameters(sess, q_estimator, target_estimator)
print("\nCopied model parameters to target network.")
print('State potential:', reward_shaper.get_state_potential(state))
# Take a step
action_probs = policy(sess, state, eps)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
print("state: {state}, action: {action}.".format(state=state, action=action))
next_state, reward, done, _ = env.step(action=action)
next_state = StatePreprocessor.process(next_state)
episode_reward += reward * multiplier
multiplier *= discount_factor
# Save transition to replay memory
replay_buffer.push(state, action, next_state, done, reward)
if total_t % update_q_values_every == 0:
# Sample a minibatch from the replay memory
samples, weights, idx = replay_buffer.sample(batch_size, total_t)
states, actions, next_states, dones, rewards, _ = map(np.array, zip(*samples))
not_dones = np.invert(dones).astype(np.float32)
# Calculate q values and targets (Double DQN)
next_q_values = q_estimator.predict(sess, next_states)
best_actions = np.argmax(next_q_values, axis=1)
next_q_values_target = target_estimator.predict(sess, next_states)
targets = (
rewards +
discount_factor * not_dones * next_q_values_target[np.arange(batch_size), best_actions])
# Perform gradient descent update
predictions = q_estimator.update(sess, states, actions, targets, weights)
# Update transition priorities
deltas = np.abs(predictions - targets)
replay_buffer.update_priorities(idx, deltas)
print("\rStep {}, episode {} ({}/{})".format(t, i_episode, total_t, num_steps), end="\t")
sys.stdout.flush()
state = next_state
total_t += 1
def main():
parser = argparse.ArgumentParser(description='Trains the agent by DQN')
parser.add_argument('experiment', help='specifies the experiment name')
args = parser.parse_args()
env = DotaEnvironment()
# Where we save our checkpoints and graphs
experiment_dir = os.path.join(os.path.abspath("./experiments/"), args.experiment)
tf.reset_default_graph()
# Create a global step variable
global_step = tf.Variable(0, name="global_step", trainable=False)
# Create estimators
q_estimator = Estimator(
STATE_DIM,
ACTIONS_TOTAL,
scope="q",
summaries_dir=experiment_dir)
target_estimator = Estimator(
STATE_DIM,
ACTIONS_TOTAL,
scope="target_q")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
deep_q_learning(
sess=sess,
env=env,
q_estimator=q_estimator,
target_estimator=target_estimator,
experiment_dir=experiment_dir,
num_steps=500000,
replay_memory_size=10000,
epsilon_decay_steps=100000,
epsilon_start=0.5,
epsilon_end=0.1,
update_target_estimator_every=1000,
update_q_values_every=4,
batch_size=32,
restore=False)
env.close()
if __name__ == "__main__":
main()
| python |
from pptx import Presentation
from pptx.chart.data import CategoryChartData
from pptx.enum.chart import XL_CHART_TYPE
from pptx.util import Inches
class Present():
def test_pptx(self):
# create presentation with 1 slide ------
prs = Presentation()
slide = prs.slides.add_slide(prs.slide_layouts[5])
# define chart data ---------------------
chart_data = CategoryChartData()
chart_data.categories = ['East', 'West', 'Midwest']
chart_data.add_series('Series 1', (19.2, 21.4, 16.7))
# add chart to slide --------------------
x, y, cx, cy = Inches(2), Inches(2), Inches(6), Inches(4.5)
slide.shapes.add_chart(
XL_CHART_TYPE.COLUMN_CLUSTERED, x, y, cx, cy, chart_data
)
prs.save('../presentation-tests/chart-01.pptx')
def test_pptx02(self):
# create presentation with 1 slide ------
prs = Presentation("../presentation-tests/AIA Dallas 5Apr18_STD209.pptx")
text_runs = []
for indx, slide in enumerate(prs.slides):
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
if indx == 21:
text_runs.append("{}-{}".format(indx, run.text))
run.text = run.text + "-modified"
print(text_runs)
prs.save('../presentation-tests/AIA Dallas 5Apr18_STD209-modified.pptx')
def test_pptx03(self):
# create presentation with 1 slide ------
prs = Presentation("../presentation-tests/AIA Dallas 5Apr18_STD209.pptx")
text_runs = []
for slide in prs.slides:
if not slide.has_notes_slide:
notes_slide = slide.notes_slide
text_frame = notes_slide.notes_text_frame
text_frame.text = 'new notes'
else:
notes_slide = slide.notes_slide
text_frame = notes_slide.notes_text_frame
text_frame.text = 'next text on existing notes slide'
for slide in prs.slides:
notes_slide = slide.notes_slide
text_frame = notes_slide.notes_text_frame
p = text_frame.add_paragraph()
p.text = 'next line added'
prs.save('../presentation-tests/AIA Dallas 5Apr18_STD209-addedNotes.pptx')
| python |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 25 13:08:47 2019
@author: Mario
"""
import subprocess, sys
if __name__ == "__main__":
#########################################
###### DISTRIBUTED COMPUTING SETUP ######
#########################################
rescanNetwork = True
if rescanNetwork:
ret = subprocess.call([sys.executable, r'networkscanner/GUI__DistributedComputingFindWorkers.py'])
if ret:
print(ret)
sys.exit('\n\nSomething went wrong. Check if file "GUI__DistributedComputingFindWorkers.py" is located in working directory.')
| python |
import argparse
import configparser
import json
import os
import sys
import time
import requests
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from util import logger
class ConversationScraper:
"""Scraper that retrieves, process and stores all messages belonging to a specific Facebook conversation"""
REQUEST_WAIT = 10
ERROR_WAIT = 30
CONVERSATION_ENDMARK = "end_of_history"
def __init__(self, convID, cookie, fb_dtsg, outDir):
self._directory = os.path.join(outDir,str(convID))
self._convID = convID
self._cookie = cookie
self._fb_dtsg = fb_dtsg
"""
POST Request full form data
(<ids_type> is "thread_fbids" for group conversations, "user_ids" otherwise)
"messages[<ids_type>][][offset]": "",
"messages[<ids_type>][][timestamp]": "",
"messages[<ids_type>][][]": "",
"client": "",
"__user": "",
"__a": "",
"__dyn": "",
"__req": "",
"fb_dtsg": "",
"ttstamp": "",
"__rev": ""
"""
def generateRequestData(self, offset, timestamp, chunkSize, isGroupConversation=False):
"""Generate the data for the POST request.
:return: the generated data
"""
ids_type = "thread_fbids" if isGroupConversation else "user_ids"
dataForm = {"messages[{}][{}][offset]".format(ids_type, self._convID) : str(offset),
"messages[{}][{}][timestamp]".format(ids_type, self._convID): timestamp,
"messages[{}][{}][limit]".format(ids_type, self._convID): str(chunkSize),
"client": "web_messenger",
"__a": "",
"__dyn": "",
"__req": "",
"fb_dtsg": self._fb_dtsg}
return dataForm
"""
POST Request all header:
"Host": "www.facebook.com",
"Origin": "https://www.facebook.com",
"Referer": "https://www.facebook.com",
"accept-encoding": "gzip,deflate",
"accept-language": "en-US,en;q=0.8",
"cookie": "",
"pragma": "no-cache",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.122 Safari/537.36",
"content-type": "application/x-www-form-urlencoded",
"accept": "*/*",
"cache-control": "no-cache"
"""
def executeRequest(self, requestData):
"""Executes the POST request and retrieves the correspondent response content.
Request headers are generated here
:return: the response content
"""
headers = {"Host": "www.facebook.com",
"Origin":"https://www.facebook.com",
"Referer":"https://www.facebook.com",
"accept-encoding": "gzip,deflate",
"accept-language": "en-US,en;q=0.8",
"cookie": self._cookie,
"pragma": "no-cache",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.122 Safari/537.36",
"content-type": "application/x-www-form-urlencoded",
"accept": "*/*",
"cache-control": "no-cache"}
url = "https://www.facebook.com/ajax/mercury/thread_info.php"
start = time.time()
response = requests.post(url, data=requestData, headers=headers)
end = time.time()
logger.info("Retrieved in {0:.2f}s".format(end-start))
#Remove additional leading characters
msgsData = response.text[9:]
return msgsData
def writeMessages(self, messages):
with open(os.path.join(self._directory,"conversation.json"), 'w') as conv:
conv.write(json.dumps(messages))
command = "python -mjson.tool " + os.path.join(self._directory, "conversation.json") + " > " + os.path.join(self._directory, "conversation.pretty.json")
os.system(command)
def scrapeConversation(self, merge, offset, timestampOffset, chunkSize, limit, isGroupConversation):
"""Retrieves conversation messages and stores them in a JSON file
If merge is specified, the new messages will be merged with the previous version of the conversation, if present
"""
if merge:
if not os.path.exists(os.path.join(self._directory,"conversation.json")):
logger.error("Conversation not present. Merge operation not possible")
return
with open(os.path.join(self._directory,"conversation.json")) as conv:
convMessages = json.load(conv)
numMergedMsgs = 0
if not os.path.exists(self._directory):
os.makedirs(self._directory)
logger.info("Starting scraping of conversation {}".format(self._convID))
messages = []
msgsData = ""
timestamp = "" if timestampOffset == 0 else str(timestampOffset)
while self.CONVERSATION_ENDMARK not in msgsData:
requestChunkSize = chunkSize if limit <= 0 else min(chunkSize, limit-len(messages))
reqData = self.generateRequestData(offset, timestamp, requestChunkSize, isGroupConversation)
logger.info("Retrieving messages {}-{}".format(offset, requestChunkSize+offset))
msgsData = self.executeRequest(reqData)
jsonData = json.loads(msgsData)
if jsonData and ('payload' in jsonData) and jsonData['payload']:
if ('actions' in jsonData['payload']) and jsonData['payload']['actions']:
actions = jsonData['payload']['actions']
#case when the last message already present in the conversation
#is older newer than the first one of the current retrieved chunk
if merge and convMessages[-1]["timestamp"] > actions[0]["timestamp"]:
for i, action in enumerate(actions):
if convMessages[-1]["timestamp"] == actions[i]["timestamp"]:
numMergedMsgs = len(actions[i+1:-1]) + len(messages)
messages = convMessages + actions[i+1:-1] + messages
break
break
#We retrieve one message two times, as the first one of the previous chunk
#and as the last one of the new one. So we here remove the duplicate,
#but only once we already retrieved at least one chunk
if len(messages) == 0:
messages = actions
else:
messages = actions[:-1] + messages
#update timestamp
timestamp = str(actions[0]["timestamp"])
else:
if 'errorSummary' in jsonData:
logger.error("Response error: " + jsonData['errorSummary'])
else:
logger.error("Response error. No messages found")
logger.error(msgsData)
return
else:
logger.error("Response error. Empty data or payload")
logger.error(msgsData)
logger.info("Retrying in {} seconds".format(self.ERROR_WAIT))
time.sleep(self.ERROR_WAIT)
continue
offset += chunkSize
if limit!= 0 and len(messages) >= limit:
break
time.sleep(self.REQUEST_WAIT)
if merge:
logger.info("Successfully merged {} new messages".format(numMergedMsgs))
logger.info("Conversation total message count = {}".format(len(messages)))
else:
logger.info("Conversation scraped successfully. {} messages retrieved".format(len(messages)))
self.writeMessages(messages)
def main(args=None):
parser = argparse.ArgumentParser(description='Conversation Scraper')
parser.add_argument('--id', metavar='conversationID', dest='convID', required=True)
parser.add_argument('--size', metavar='chunkSize', type=int, dest='chunkSize', default=2000,
help="number of messages to retrieve for each request")
#TODO not working, the timestamp seems the only relevant parameter
parser.add_argument('--off', metavar='offset', type=int, dest='offset', default=0,
help="messages number scraping offset")
#TODO to test, ??better single var
parser.add_argument('--date', metavar='offset', type=int, dest='timestampOffset', default=0,
help="messages timestamp scraping offset, has precedence over messages number offset")
parser.add_argument('--limit', type=int, dest='limit', default=0,
help="number of messages to be retrieved")
#Tells the program to try to merge the new messages with the previously scraped conversation
#avoid the need to scrape it all from the beginning
parser.add_argument('-m', dest='merge', action='store_true',
help="merge the new messages with previously scraped conversation")
parser.add_argument('-g', dest='isGroupConversation', action='store_true',
help="specify if you want to scrape a group conversation")
parser.set_defaults(merge=False)
baseFolderPath = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
parser.add_argument('--out', metavar='outputDir', dest='outDir',
default=os.path.join(baseFolderPath, 'Messages'))
parser.add_argument('--conf', metavar='configFilepath', dest='configFilepath',
default=os.path.join(baseFolderPath, 'config.ini'))
args = parser.parse_args()
convID = args.convID
chunkSize = args.chunkSize
timestampOffset = args.timestampOffset
offset = args.offset
limit = args.limit
merge = args.merge
isGroupConversation = args.isGroupConversation
outDir = args.outDir
configFilepath = args.configFilepath
DATA_SECTION = "User Data"
config = configparser.ConfigParser(interpolation=None)
config.read(configFilepath)
cookie = config.get(DATA_SECTION, "Cookie")
fb_dtsg = config.get(DATA_SECTION, "Fb_dtsg")
scraper = ConversationScraper(convID, cookie, fb_dtsg, outDir)
scraper.scrapeConversation(merge, offset, timestampOffset, chunkSize, limit, isGroupConversation)
if __name__ == "__main__":
main(sys.argv[1:]) | python |
import sys
import timeit
import zmq
from kombu import Connection
from timer import Timer
TOTAL_MESSAGES = int(sys.argv[1])
amqp_timer = Timer()
zmq_timer = Timer()
def log(msg):
pass
# print(msg)
def main():
context = zmq.Context()
sockets = {}
with Connection("amqp://guest:[email protected]:5672//") as conn:
simple_queue = conn.SimpleQueue("simple_queue")
# Block until we get the 'ready to start' message
print("Waiting for kick-off message from producer")
simple_queue.get(block=True).ack()
print("Got it! Let's go...")
def get():
nonlocal sockets
with amqp_timer:
message = simple_queue.get(block=True)
message_id = message.headers.get("id")
addr = message.headers.get("reply-to")
if not addr:
with amqp_timer:
message.ack()
log("Message with no reply-to header. Ignoring.")
return
if addr not in sockets:
log("Opening socket to: {}".format(addr))
with zmq_timer:
socket = context.socket(zmq.PUSH)
socket.connect(addr)
sockets[addr] = socket
socket = sockets[addr]
log("Sending response for {} to: {}".format(message_id, addr))
# Send the message ID back plus some data
with zmq_timer:
socket.send(bytes(message_id, "utf8") + b" x" * 1024)
log("Sent")
with amqp_timer:
message.ack()
seconds = timeit.timeit(get, number=TOTAL_MESSAGES)
print("Time per get: {}ms".format(round(seconds * 1000 / TOTAL_MESSAGES, 2)))
print("Gets per second: {}".format(round(TOTAL_MESSAGES / seconds, 2)))
print("ZeroMQ time: {}".format(zmq_timer))
print("AMQP time: {}".format(amqp_timer))
simple_queue.close()
if __name__ == "__main__":
main()
| python |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for GNN."""
import os
from models import GAT
from models import GCN
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
def build_model(model_name, num_layers, hidden_dim, num_classes, dropout_rate,
num_heads, sparse):
"""Create gnn model and initialize parameters weights."""
# Convert hidden_dim to integers
for i in range(len(hidden_dim)):
hidden_dim[i] = int(hidden_dim[i])
# Only GCN and GAT are available.
if model_name == 'gcn':
model = GCN(
num_layers=num_layers,
hidden_dim=hidden_dim,
num_classes=num_classes,
dropout_rate=dropout_rate,
sparse=sparse,
bias=True)
elif model_name == 'gat':
model = GAT(
num_layers=num_layers,
hidden_dim=hidden_dim,
num_classes=num_classes,
dropout_rate=dropout_rate,
num_heads=num_heads,
sparse=sparse)
return model
def cal_acc(labels, logits):
indices = tf.math.argmax(logits, axis=1)
acc = tf.math.reduce_mean(tf.cast(indices == labels, dtype=tf.float32))
return acc.numpy().item()
def encode_onehot(labels):
"""Provides a mapping from string labels to integer indices."""
label_index = {
'Case_Based': 0,
'Genetic_Algorithms': 1,
'Neural_Networks': 2,
'Probabilistic_Methods': 3,
'Reinforcement_Learning': 4,
'Rule_Learning': 5,
'Theory': 6,
}
# Convert to onehot label
num_classes = len(label_index)
onehot_labels = np.zeros((len(labels), num_classes))
idx = 0
for s in labels:
onehot_labels[idx, label_index[s]] = 1
idx += 1
return onehot_labels
def normalize_adj_matrix(adj):
"""Normalize adjacency matrix."""
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def normalize_features(features):
"""Row-normalize feature matrix."""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features
def sparse_matrix_to_tf_sparse_tensor(matrix):
"""Convert scipy sparse matrix to `tf.sparse.SparseTensor`."""
sp_matrix = matrix.tocoo().astype(np.float32)
indices = tf.convert_to_tensor(
np.vstack((sp_matrix.row, sp_matrix.col)).T.astype(np.int64))
values = tf.convert_to_tensor(sp_matrix.data)
shape = tf.TensorShape(sp_matrix.shape)
return tf.sparse.SparseTensor(indices, values, shape)
def load_dataset(dataset, sparse_features, normalize_adj):
"""Loads Cora dataset."""
dir_path = os.path.join('data', dataset)
content_path = os.path.join(dir_path, '{}.content'.format(dataset))
citation_path = os.path.join(dir_path, '{}.cites'.format(dataset))
content = np.genfromtxt(content_path, dtype=np.dtype(str))
idx = np.array(content[:, 0], dtype=np.int32)
features = sp.csr_matrix(content[:, 1:-1], dtype=np.float32)
labels = encode_onehot(content[:, -1])
# Dict which maps paper id to data id
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt(citation_path, dtype=np.int32)
edges = np.array(
list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# Add self-connection edge
adj = adj + sp.eye(adj.shape[0])
features = normalize_features(features)
if normalize_adj:
adj = normalize_adj_matrix(adj)
# 5% for train, 300 for validation, 1000 for test
idx_train = slice(140)
idx_val = slice(200, 500)
idx_test = slice(500, 1500)
features = tf.convert_to_tensor(np.array(features.todense()))
labels = tf.convert_to_tensor(np.where(labels)[1])
if sparse_features:
adj = sparse_matrix_to_tf_sparse_tensor(adj)
else:
adj = tf.convert_to_tensor(np.array(adj.todense()))
return adj, features, labels, idx_train, idx_val, idx_test
| python |
# import psycopg2
# import os
# import csv
# class PostgreSQL:
# def __init__(self, host, port, username, password, database):
# self.host = host
# self.port = port
# self.username = username
# self.password = password
# self.database = database
# self.connection = psycopg2.connect(user=self.username,
# password=self.password,
# host=self.host,
# port=self.port,
# database=self.database)
# self.cursor = self.connection.cursor()
# def close(self):
# if (self.connection):
# self.cursor.close()
# self.connection.close()
# def select(self, table_name, columns, key_names=[], key_values=[], key_ops=[], order=False, reverse=False, like=False, limit=None, offset=None):
# query = ' '.join(['SELECT', ('%s')%(', '.join(columns)), 'FROM', table_name])
# num_keys = len(key_names)
# assert num_keys == len(key_values)
# if (like):
# if(len(key_ops) == 0):
# key_ops = ['LIKE'] * num_keys
# if(num_keys > 0):
# condition = ' WHERE ' + ' AND '.join(['%s %s '%(key_name, key_op) + '%s' for key_name, key_op in zip(key_names, key_ops)])
# query += condition
# if(order):
# orderby = ' ORDER BY ' + str(order)
# query += orderby
# if(reverse):
# query += ' DESC '
# if(limit):
# query += ' LIMIT %d'%(limit)
# if(offset):
# query += ' OFFSET %d'%(offset)
# else:
# if(len(key_ops) == 0):
# key_ops = ['='] * num_keys
# if(num_keys > 0):
# condition = ' WHERE ' + ' AND '.join(['%s %s '%(key_name, key_op) + '%s' for key_name, key_op in zip(key_names, key_ops)])
# query += condition
# if(order):
# orderby = ' ORDER BY ' + str(order)
# query += orderby
# if(reverse):
# query += ' DESC '
# if(limit):
# query += ' LIMIT %d'%(limit)
# if(offset):
# query += ' OFFSET %d'%(offset)
# # print(query, key_values)
# self.cursor.execute(query, tuple(key_values))
# data = self.cursor.fetchall()
# return data
# def update(self, table_name, target_columns, target_values, key_columns, key_values):
# query = ' '.join(['UPDATE ', table_name, 'SET '])
# num_updates = len(target_columns)
# assert num_updates == len(target_values)
# updates = ', '.join(['%s = '%(column) + '%s' for column in target_columns])
# query += updates
# num_keys = len(key_columns)
# assert num_keys == len(key_values)
# if(num_keys > 0):
# condition = ' WHERE ' + ' AND '.join(['%s = '%(column) + '%s' for column in key_columns])
# query += condition
# # print(query)
# self.cursor.execute(query, tuple(target_values + key_values))
# self.connection.commit()
# def insert(self, table_name, columns, values):
# query = ' '.join(['INSERT INTO', table_name, ('(%s)')%(', '.join(columns)), 'VALUES', '(', ','.join(['%s']*len(values)) , ')'])
# values = tuple(values)
# # print('table', table_name)
# # print(values)
# self.cursor.execute(query, values)
# self.connection.commit()
# def delete(self, table_name, key_columns, key_values):
# query = ' '.join(['DELETE FROM ', table_name])
# condition = ' WHERE ' + ' AND '.join(['%s = '%(column) + '%s' for column in key_columns])
# query += condition
# self.cursor.execute(query, tuple(key_values))
# self.connection.commit()
# def list_loans(self):
# list_loans = self.select(table_name='loan_information_table', columns=['loan_name', 'amount_of_money'])
# return list_loans
# def detail_loan(self, key_names, key_values):
# detail = self.select(table_name='loan_information_table', columns=['loan_name', 'amount_of_money', 'methob', 'requirement', 'loan_term', 'interest_rate', 'disbursement_time'], key_names=key_names, key_values=key_values)
# return detail
| python |
from __future__ import print_function, absolute_import
import numpy as np
import torch
from torch.utils.data import Dataset
from functools import reduce
#####################################
# data loader with four output
#####################################
class PoseDataSet(Dataset):
def __init__(self, poses_3d, poses_2d, actions, cams):
assert poses_3d is not None
self._poses_3d = np.concatenate(poses_3d)
self._poses_2d = np.concatenate(poses_2d)
self._actions = reduce(lambda x, y: x + y, actions)
self._cams = np.concatenate(cams)
assert self._poses_3d.shape[0] == self._poses_2d.shape[0] and self._poses_3d.shape[0] == len(self._actions)
assert self._poses_3d.shape[0] == self._cams.shape[0]
print('Generating {} poses...'.format(len(self._actions)))
def __getitem__(self, index):
out_pose_3d = self._poses_3d[index]
out_pose_2d = self._poses_2d[index]
out_action = self._actions[index]
out_cam = self._cams[index]
out_pose_3d = torch.from_numpy(out_pose_3d).float()
out_pose_2d = torch.from_numpy(out_pose_2d).float()
return out_pose_3d, out_pose_2d, out_action, out_cam
def __len__(self):
return len(self._actions)
#####################################
# data loader with two output
#####################################
class PoseBuffer(Dataset):
def __init__(self, poses_3d, poses_2d, score=None):
assert poses_3d is not None
self._poses_3d = np.concatenate(poses_3d)
self._poses_2d = np.concatenate(poses_2d)
assert self._poses_3d.shape[0] == self._poses_2d.shape[0]
print('Generating {} poses...'.format(self._poses_3d.shape[0]))
def __getitem__(self, index):
out_pose_3d = self._poses_3d[index]
out_pose_2d = self._poses_2d[index]
out_pose_3d = torch.from_numpy(out_pose_3d).float()
out_pose_2d = torch.from_numpy(out_pose_2d).float()
return out_pose_3d, out_pose_2d
def __len__(self):
return len(self._poses_2d)
#############################################################
# data loader for GAN
#############################################################
class PoseTarget(Dataset):
def __init__(self, poses):
assert poses is not None
self._poses = np.concatenate(poses)
print('Generating {} poses...'.format(self._poses.shape[0]))
def __getitem__(self, index):
out_pose = self._poses[index]
out_pose = torch.from_numpy(out_pose).float()
return out_pose
def __len__(self):
return len(self._poses)
class PoseTarget3D(Dataset):
def __init__(self, poses_3d):
assert poses_3d is not None
self._poses_3d = np.concatenate(poses_3d)
print('Generating {} poses...'.format(self._poses_3d.shape[0]))
def __getitem__(self, index):
out_pose_3d = self._poses_3d[index]
out_pose_3d = torch.from_numpy(out_pose_3d).float()
return out_pose_3d
def __len__(self):
return len(self._poses_3d)
class PoseTarget2D(Dataset):
def __init__(self, poses_2d):
assert poses_2d is not None
poses_2d = np.concatenate(poses_2d)
tmp_mask = np.ones((poses_2d.shape[0], poses_2d.shape[1], 1), dtype='float32')
self._poses_2d = np.concatenate((poses_2d, tmp_mask), axis=2)
print('Generating {} poses...'.format(self._poses_2d.shape[0]))
def __getitem__(self, index):
out_pose_2d = self._poses_2d[index]
out_pose_2d = torch.from_numpy(out_pose_2d).float()
return out_pose_2d[:, :-1], out_pose_2d[:, -1:]
def __len__(self):
return len(self._poses_2d)
| python |
import os
import pandas as pd
from base import BaseFeature
from google.cloud import storage, bigquery
from google.cloud import bigquery_storage_v1beta1
from encoding_func import target_encoding
class TargetEncodingResponseTimeDiff(BaseFeature):
def import_columns(self):
return [
"1"
]
def _read_features_from_bigquery(self, read_table_name: str) -> pd.DataFrame:
self._logger.info(f"Reading from {read_table_name}")
query = """
WITH
response_times AS (
SELECT
tweet_id,
engaging_user_id,
CASE WHEN n_engagement = 0 THEN NULL ELSE
TIMESTAMP_SECONDS(CAST( (
IF(like_engagement_timestamp IS NOT NULL, like_engagement_timestamp, 0) +
IF(reply_engagement_timestamp IS NOT NULL, reply_engagement_timestamp, 0) +
IF(retweet_engagement_timestamp IS NOT NULL, retweet_engagement_timestamp, 0) +
IF(retweet_with_comment_engagement_timestamp IS NOT NULL, retweet_with_comment_engagement_timestamp, 0)
) / n_engagement AS INT64)) END AS avg_engagement_timestamp
FROM (
SELECT
tweet_id,
engaging_user_id,
like_engagement_timestamp,
reply_engagement_timestamp,
retweet_engagement_timestamp,
retweet_with_comment_engagement_timestamp,
IF(like_engagement_timestamp IS NOT NULL, 1, 0) +
IF(reply_engagement_timestamp IS NOT NULL, 1, 0) +
IF(retweet_engagement_timestamp IS NOT NULL, 1, 0) +
IF(retweet_with_comment_engagement_timestamp IS NOT NULL, 1, 0) AS n_engagement
FROM
{}
)
)
SELECT
tweet_id,
engaging_user_id,
TIMESTAMP_DIFF(avg_engagement_timestamp, LAG(avg_engagement_timestamp) OVER(PARTITION BY engaging_user_id ORDER BY avg_engagement_timestamp), MINUTE) AS diff_time
FROM
response_times
ORDER BY
tweet_id,
engaging_user_id
""".format(read_table_name)
if self.debugging:
query += " limit 10000"
bqclient = bigquery.Client(project=self.PROJECT_ID)
bqstorageclient = bigquery_storage_v1beta1.BigQueryStorageClient()
df = (
bqclient.query(query)
.result()
.to_dataframe(bqstorage_client=bqstorageclient)
)
return df
def make_features(self, df_train_input, df_test_input):
df_train_input = self._read_features_from_bigquery(self.train_table)
df_test_input = self._read_features_from_bigquery(self.test_table)
df_train_features = pd.DataFrame()
df_test_features = pd.DataFrame()
folds_train = self._download_from_gs(
feather_file_name="TimeGroupKFold_training.ftr"
)
category_columns = [
"engaging_user_id",
]
target_columns = [
"diff_time",
]
for target_col in target_columns:
print(f'============= {target_col} =============')
# Get folds
folds_col = ["TimeGroupKFold_val_position"]
assert len(folds_col) == 1, "The number of fold column must be one"
folds = folds_train[folds_col]
n_fold = folds.max().values[0] + 1
folds_ids = []
for i in range(n_fold):
trn_idx = folds[folds != i].dropna().index
val_idx = folds[folds == i].dropna().index
folds_ids.append((trn_idx, val_idx))
print(f"{i+1}fold: n_trn={len(trn_idx)}, n_val={len(val_idx)}")
for cat_col in category_columns:
train_result, test_result = target_encoding(
cat_col, df_train_input, df_test_input, target_col, folds_ids)
df_train_input.drop(columns=[f"{cat_col}_ta"], inplace=True)
df_test_input.drop(columns=[f"{cat_col}_ta"], inplace=True)
df_train_features[f"{target_col}__{cat_col}"] = train_result
df_test_features[f"{target_col}__{cat_col}"] = test_result
print(df_train_features.isnull().sum())
print(df_test_features.isnull().sum())
return df_train_features, df_test_features
if __name__ == "__main__":
TargetEncodingResponseTimeDiff.main()
| python |
import numpy as np
from math import pi
'''
Class to calculate the inverse kinematics for the stewart platform.
Needs pose and twist input to calculate leg length and velocity
All length-units is written in meters [m]
'''
class InverseKinematics(object):
def __init__(self):
# minimum possible position in z (heave) direction
self.__z_min = 0.16107
# defining base plate position vectors
self.__a1 = np.array([
[-0.14228], [-0.0475], [0.]
])
self.__a2 = np.array([
[-0.11228], [-0.09947], [0.]
])
self.__a3 = np.array([
[0.11228], [-0.09947], [0.]
])
self.__a4 = np.array([
[0.14228], [-0.0475], [0.]
])
self.__a5 = np.array([
[0.030], [0.14697], [0.]
])
self.__a6 = np.array([
[-0.030], [0.14697], [0.]
])
# defining tool plate position vectors
self.__b1 = np.array([
[-0.09761], [0.02172], [0.]
])
self.__b2 = np.array([
[-0.030], [-0.09539], [0.]
])
self.__b3 = np.array([
[0.030], [-0.09539], [0.]
])
self.__b4 = np.array([
[0.09761], [0.02172], [0.]
])
self.__b5 = np.array([
[0.06761], [0.07368], [0.]
])
self.__b6 = np.array([
[-0.06761], [0.07368], [0.]
])
def __jacobian(self):
# unit-vectors of s (leg-vectors)
e_ = np.array([ self.__s1 / self.__l1,
self.__s2 / self.__l2,
self.__s3 / self.__l3,
self.__s4 / self.__l4,
self.__s5 / self.__l5,
self.__s6 / self.__l6, ])
# rotation matrix times tool-plate position vectors
rot_ = np.array([ self.__R @ self.__b1,
self.__R @ self.__b2,
self.__R @ self.__b3,
self.__R @ self.__b4,
self.__R @ self.__b5,
self.__R @ self.__b6 ])
rot = rot_.reshape(6, 1, 3)
e = e_.reshape(6, 1, 3)
# cross-product
cross1 = np.cross(rot[0], e[0])
cross2 = np.cross(rot[1], e[1])
cross3 = np.cross(rot[2], e[2])
cross4 = np.cross(rot[3], e[3])
cross5 = np.cross(rot[4], e[4])
cross6 = np.cross(rot[5], e[5])
# add together to a single array per row of the jacobian
# where the unit vector represents the translational part
# and the cross product represents the rotational part of the jacobian
J1 = np.hstack((e[0], cross1))
J2 = np.hstack((e[1], cross2))
J3 = np.hstack((e[2], cross3))
J4 = np.hstack((e[3], cross4))
J5 = np.hstack((e[4], cross5))
J6 = np.hstack((e[5], cross6))
# put all the rows above together in a single 6x6 matrix
J = np.concatenate((J1, J2, J3, J4, J5, J6), axis=0)
return J
def calc_output(self, pose, twist):
x = pose[0]
y = pose[1]
z = self.__z_min + pose[2]
# calculating sin and cos values for matrices
phi_sin = np.sin(pose[3]) # roll
phi_cos = np.cos(pose[3])
theta_sin = np.sin(pose[4]) # pitch
theta_cos = np.cos(pose[4])
psi_sin = np.sin(pose[5]) # yaw
psi_cos = np.cos(pose[5])
# defining the rotation matrices for each axis of rotation
r_x = np.array([
[1., 0., 0.],
[0., phi_cos, -phi_sin],
[0., phi_sin, phi_cos]
])
r_y = np.array([
[theta_cos, 0., theta_sin],
[0., 1., 0.],
[-theta_sin, 0., theta_cos]
])
r_z = np.array([
[psi_cos, -psi_sin, 0.],
[psi_sin, psi_cos, 0.],
[0., 0., 1.]
])
# defining total rotation matrix
self.__R = r_z @ r_y @ r_x
# defining position vector
p = np.array([
[x],
[y],
[z]
])
# calculating leg-vectors
self.__s1 = p + (self.__R @ self.__b1) - self.__a1
self.__s2 = p + (self.__R @ self.__b2) - self.__a2
self.__s3 = p + (self.__R @ self.__b3) - self.__a3
self.__s4 = p + (self.__R @ self.__b4) - self.__a4
self.__s5 = p + (self.__R @ self.__b5) - self.__a5
self.__s6 = p + (self.__R @ self.__b6) - self.__a6
# calculating leg lengths (leg-vector magnitude)
self.__l1 = np.sqrt(np.float_power(self.__s1[0, 0], 2) + np.float_power(self.__s1[1, 0], 2) + np.float_power(self.__s1[2, 0], 2))
self.__l2 = np.sqrt(np.float_power(self.__s2[0, 0], 2) + np.float_power(self.__s2[1, 0], 2) + np.float_power(self.__s2[2, 0], 2))
self.__l3 = np.sqrt(np.float_power(self.__s3[0, 0], 2) + np.float_power(self.__s3[1, 0], 2) + np.float_power(self.__s3[2, 0], 2))
self.__l4 = np.sqrt(np.float_power(self.__s4[0, 0], 2) + np.float_power(self.__s4[1, 0], 2) + np.float_power(self.__s4[2, 0], 2))
self.__l5 = np.sqrt(np.float_power(self.__s5[0, 0], 2) + np.float_power(self.__s5[1, 0], 2) + np.float_power(self.__s5[2, 0], 2))
self.__l6 = np.sqrt(np.float_power(self.__s6[0, 0], 2) + np.float_power(self.__s6[1, 0], 2) + np.float_power(self.__s6[2, 0], 2))
# actuator stroke position
d1 = self.__l1 - 0.181
d2 = self.__l2 - 0.181
d3 = self.__l3 - 0.181
d4 = self.__l4 - 0.181
d5 = self.__l5 - 0.181
d6 = self.__l6 - 0.181
# inverse jacobian with respect to q (pose params, stored above as private self variables)
J = self.__jacobian()
# actuator stroke velocity
d_dot = np.hstack((J @ twist))
return np.array([[d1, d2, d3, d4, d5, d6], d_dot])
| python |
import torch.nn as nn
from typing import Optional, Union, List
from .model_config import MODEL_CONFIG
from .decoder.deeplabv3plus import DeepLabV3PlusDecoder
from .get_encoder import build_encoder
from .base_model import SegmentationModel
from .lib import SynchronizedBatchNorm2d
BatchNorm2d = SynchronizedBatchNorm2d
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
class DeepLabV3Plus(SegmentationModel):
"""DeepLabV3+ implementation from "Encoder-Decoder with Atrous Separable
Convolution for Semantic Image Segmentation"
Args:
encoder_name: Name of the classification model that will be used as an encoder (a.k.a backbone)
to extract features of different spatial resolution
encoder_depth: A number of stages used in encoder in range [3, 5]. Each stage generate features
two times smaller in spatial dimensions than previous one (e.g. for depth 0 we will have features
with shapes [(N, C, H, W),], for depth 1 - [(N, C, H, W), (N, C, H // 2, W // 2)] and so on).
Default is 5
encoder_weights: One of **None** (random initialization), **"imagenet"** (pre-training on ImageNet) and
other pretrained weights (see table with available weights for each encoder_name)
encoder_output_stride: Downsampling factor for last encoder features (see original paper for explanation)
decoder_atrous_rates: Dilation rates for ASPP module (should be a tuple of 3 integer values)
decoder_channels: A number of convolution filters in ASPP module. Default is 256
in_channels: A number of input channels for the model, default is 3 (RGB images)
classes: A number of classes for output mask (or you can think as a number of channels of output mask)
activation: An activation function to apply after the final convolution layer.
Available options are **"sigmoid"**, **"softmax"**, **"logsoftmax"**, **"tanh"**, **"identity"**, **callable** and **None**.
Default is **None**
upsampling: Final upsampling factor. Default is 4 to preserve input-output spatial shape identity
aux_params: Dictionary with parameters of the auxiliary output (classification head). Auxiliary output is build
on top of encoder if **aux_params** is not **None** (default). Supported params:
- classes (int): A number of classes
- pooling (str): One of "max", "avg". Default is "avg"
- dropout (float): Dropout factor in [0, 1)
- activation (str): An activation function to apply "sigmoid"/"softmax" (could be **None** to return logits)
Returns:
``torch.nn.Module``: **DeepLabV3Plus**
Reference:
https://arxiv.org/abs/1802.02611v3
"""
def __init__(
self,
in_channels: int = 3,
encoder_name: str = "resnet34",
encoder_weights: Optional[str] = None,
encoder_depth: int = 5,
encoder_channels: List[int] = [32,64,128,256,512],
encoder_output_stride: int = 16,
decoder_channels: int = 256,
decoder_atrous_rates: tuple = (12, 24, 36),
upsampling: int = 4,
classes: int = 1,
aux_classifier: bool = False,
):
super().__init__()
self.encoder_channels = encoder_channels
self.encoder = build_encoder(
encoder_name,
weights=encoder_weights,
n_channels=in_channels,
)
if encoder_output_stride == 8:
self.encoder.make_dilated(
stage_list=[3, 4],
dilation_list=[2, 4]
)
elif encoder_output_stride == 16:
self.encoder.make_dilated(
stage_list=[4],
dilation_list=[2]
)
elif encoder_output_stride == 32:
self.encoder.make_dilated(
stage_list=[4],
dilation_list=[2]
)
else:
raise ValueError(
"Encoder output stride should be 8 or 16, got {}".format(encoder_output_stride)
)
self.decoder = DeepLabV3PlusDecoder(
encoder_channels=self.encoder_channels,
out_channels=decoder_channels,
atrous_rates=decoder_atrous_rates,
output_stride=encoder_output_stride,
)
self.segmentation_head = nn.Sequential(
nn.UpsamplingBilinear2d(scale_factor=upsampling) if upsampling > 1 else nn.Identity(),
nn.Conv2d(decoder_channels[-1], classes, kernel_size=3, padding=1)
)
if aux_classifier:
self.classification_head = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Dropout(p=0.2, inplace=True),
nn.Linear(self.encoder_channels[-1], classes - 1, bias=True)
)
else:
self.classification_head = None
self.name = "u-{}".format(encoder_name)
self.initialize()
def make_dilated(self, stage_list, dilation_list):
stages = self.get_stages()
for stage_indx, dilation_rate in zip(stage_list, dilation_list):
self.replace_strides_with_dilation(
module=stages[stage_indx],
dilation_rate=dilation_rate,
)
def replace_strides_with_dilation(self, module, dilation_rate):
"""Patch Conv2d modules replacing strides with dilation"""
for mod in module.modules():
if isinstance(mod, nn.Conv2d):
mod.stride = (1, 1)
mod.dilation = (dilation_rate, dilation_rate)
kh, kw = mod.kernel_size
mod.padding = ((kh // 2) * dilation_rate, (kh // 2) * dilation_rate)
# Kostyl for EfficientNet
if hasattr(mod, "static_padding"):
mod.static_padding = nn.Identity() | python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import argparse
def load():
"""
Description
Returns
Arguments object.
"""
parser = argparse.ArgumentParser(description="Program description",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("-t", "--template", # -t is the short argument alias --template is the long argument alias
help="Argument description/help",)
arguments = parser.parse_args()
return arguments
| python |
from .model import Model
class Datacenters(Model):
pass
| python |
import threading
import typing as tp
from contextlib import contextmanager
from dataclasses import dataclass
from treex import types
@dataclass
class _Context(threading.local):
call_info: tp.Optional[tp.Dict["Module", tp.Tuple[types.Inputs, tp.Any]]] = None
def __enter__(self):
global _CONTEXT
self._old_context = _CONTEXT
_CONTEXT = self
def __exit__(self, *args):
global _CONTEXT
_CONTEXT = self._old_context
@contextmanager
def update(self, **kwargs):
fields = vars(self).copy()
fields.pop("_old_context", None)
fields.update(kwargs)
with _Context(**fields):
yield
_CONTEXT = _Context()
| python |
"""empty message
Revision ID: b1f6e283b530
Revises:
Create Date: 2021-06-15 12:34:40.497836
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b1f6e283b530'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_users_username', table_name='users')
op.drop_constraint('users_email_key', 'users', type_='unique')
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.drop_column('users', 'username')
op.drop_column('users', 'date_birth')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('date_birth', sa.DATE(), autoincrement=False, nullable=False))
op.add_column('users', sa.Column('username', sa.VARCHAR(length=64), autoincrement=False, nullable=True))
op.drop_index(op.f('ix_users_email'), table_name='users')
op.create_unique_constraint('users_email_key', 'users', ['email'])
op.create_index('ix_users_username', 'users', ['username'], unique=False)
# ### end Alembic commands ###
| python |
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.io.vasp.outputs import BSVasprun, Eigenval
from pymatgen.io.vasp.inputs import Kpoints, Poscar, Incar
from pymatgen.symmetry.bandstructure import HighSymmKpath
from vaspvis.unfold import unfold, make_kpath, removeDuplicateKpoints
from pymatgen.core.periodic_table import Element
from pyprocar.utilsprocar import UtilsProcar
from pyprocar.procarparser import ProcarParser
from functools import reduce
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge
from matplotlib.collections import PatchCollection
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import time
from copy import deepcopy
import os
import matplotlib as mpl
mpl.rcParams.update(mpl.rcParamsDefault)
class Band:
"""
This class contains all the methods for constructing band structures
from the outputs of VASP band structure calculations.
Parameters:
folder (str): This is the folder that contains the VASP files
projected (bool): Determines whether of not to parse the projected
eigenvalues from the PROCAR file. Making this true
increases the computational time, so only use if a projected
band structure is required.
spin (str): Choose which spin direction to parse. ('up' or 'down')
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for unfolded calculations this
information is a required input for proper labeling of the figure
for unfolded calculations. This information is extracted from the KPOINTS
files for non-unfolded calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry point.
This is also only required for unfolded calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
"""
def __init__(self, folder, projected=False, unfold=False, spin='up', kpath=None, n=None, M=None, high_symm_points=None, bandgap=False, printbg=True, shift_efermi=0):
"""
Initialize parameters upon the generation of this class
Parameters:
folder (str): This is the folder that contains the VASP files
projected (bool): Determines whether of not to parse the projected
eigenvalues from the PROCAR file. Making this true
increases the computational time, so only use if a projected
band structure is required.
spin (str): Choose which spin direction to parse. ('up' or 'down')
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for unfolded calculations this
information is a required input for proper labeling of the figure
for unfolded calculations. This information is extracted from the KPOINTS
files for non-unfolded calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry point.
This is also only required for unfolded calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
"""
self.bandgap = bandgap
self.printbg = printbg
self.bg = None
self.eigenval = Eigenval(os.path.join(folder, 'EIGENVAL'))
self.efermi = float(os.popen(f'grep E-fermi {os.path.join(folder, "OUTCAR")}').read().split()[2]) + shift_efermi
self.poscar = Poscar.from_file(
os.path.join(folder, 'POSCAR'),
check_for_POTCAR=False,
read_velocities=False
)
self.incar = Incar.from_file(
os.path.join(folder, 'INCAR')
)
if 'LSORBIT' in self.incar:
if self.incar['LSORBIT']:
self.lsorbit = True
else:
self.lsorbit = False
else:
self.lsorbit = False
if 'ISPIN' in self.incar:
if self.incar['ISPIN'] == 2:
self.ispin = True
else:
self.ispin = False
else:
self.ispin = False
if 'LHFCALC' in self.incar:
if self.incar['LHFCALC']:
self.hse = True
else:
self.hse = False
else:
self.hse = False
self.kpoints_file = Kpoints.from_file(os.path.join(folder, 'KPOINTS'))
self.wavecar = os.path.join(folder, 'WAVECAR')
self.projected = projected
self.forbitals = False
self.unfold = unfold
if self.hse and self.unfold:
self.hse = False
self.kpath = kpath
self.n = n
self.M = M
self.high_symm_points = high_symm_points
self.folder = folder
self.spin = spin
self.spin_dict = {'up': Spin.up, 'down': Spin.down}
if not self.unfold:
self.pre_loaded_bands = os.path.isfile(os.path.join(folder, 'eigenvalues.npy'))
self.eigenvalues, self.kpoints = self._load_bands()
else:
self.pre_loaded_bands = os.path.isfile(os.path.join(folder, 'unfolded_eigenvalues.npy'))
self.eigenvalues, self.spectral_weights, self.K_indices, self.kpoints = self._load_bands_unfold()
self.color_dict = {
0: '#FF0000',
1: '#0000FF',
2: '#008000',
3: '#800080',
4: '#E09200',
5: '#FF5C77',
6: '#778392',
7: '#07C589',
8: '#40BAF2',
9: '#FF0000',
10: '#0000FF',
11: '#008000',
12: '#800080',
13: '#E09200',
14: '#FF5C77',
15: '#778392',
}
self.orbital_labels = {
0: 's',
1: 'p_{y}',
2: 'p_{x}',
3: 'p_{z}',
4: 'd_{xy}',
5: 'd_{yz}',
6: 'd_{z^{2}}',
7: 'd_{xz}',
8: 'd_{x^{2}-y^{2}}',
9: 'f_{y^{3}x^{2}}',
10: 'f_{xyz}',
11: 'f_{yz^{2}}',
12: 'f_{z^{3}}',
13: 'f_{xz^{2}}',
14: 'f_{zx^{3}}',
15: 'f_{x^{3}}',
}
self.spd_relations = {
's': 0,
'p': 1,
'd': 2,
'f': 3,
}
if projected:
self.pre_loaded_projections = os.path.isfile(os.path.join(folder, 'projected_eigenvalues.npy'))
self.projected_eigenvalues = self._load_projected_bands()
def _get_bandgap(self, eigenvalues):
if np.sum(np.diff(np.sign(eigenvalues[:,:,0])) != 0) == 0:
occupied = eigenvalues[np.where(eigenvalues[:,:,-1] > 1e-8)]
unoccupied = eigenvalues[np.where(eigenvalues[:,:,-1] < 1e-8)]
vbm = np.max(occupied[:,0])
cbm = np.min(unoccupied[:,0])
bg = cbm - vbm
else:
bg = 0
if self.printbg:
print(f'Bandgap = {np.round(bg, 3)} eV')
self.bg = bg
def _load_bands(self):
"""
This function is used to load eigenvalues from the vasprun.xml
file and into a dictionary which is in the form of
band index --> eigenvalues
Returns:
bands_dict (dict[str][np.ndarray]): Dictionary which contains
the eigenvalues for each band
"""
if self.spin == 'up':
spin = 0
if self.spin == 'down':
spin = 1
if self.pre_loaded_bands:
with open(os.path.join(self.folder, 'eigenvalues.npy'), 'rb') as eigenvals:
band_data = np.load(eigenvals)
if self.ispin and not self.lsorbit:
eigenvalues = band_data[:,:,[0,2]]
kpoints = band_data[0,:,4:]
if self.bandgap:
eigenvalues_up = band_data[:,:,[0,1]]
eigenvalues_down = band_data[:,:,[2,3]]
eigenvalues_bg = np.vstack([eigenvalues_up, eigenvalues_down])
else:
eigenvalues = band_data[:,:,0]
kpoints = band_data[0,:,2:]
if self.bandgap:
eigenvalues_bg = band_data[:,:,[0,1]]
if self.bandgap:
self._get_bandgap(eigenvalues=eigenvalues_bg)
else:
if len(self.eigenval.eigenvalues.keys()) > 1:
eigenvalues_up = np.transpose(self.eigenval.eigenvalues[Spin.up], axes=(1,0,2))
eigenvalues_down = np.transpose(self.eigenval.eigenvalues[Spin.down], axes=(1,0,2))
eigenvalues_up[:,:,0] = eigenvalues_up[:,:,0] - self.efermi
eigenvalues_down[:,:,0] = eigenvalues_down[:,:,0] - self.efermi
eigenvalues = np.concatenate(
[eigenvalues_up, eigenvalues_down],
axis=2
)
if self.bandgap:
eigenvalues_bg = np.vstack([eigenvalues_up, eigenvalues_down])
else:
eigenvalues = np.transpose(self.eigenval.eigenvalues[Spin.up], axes=(1,0,2))
eigenvalues[:,:,0] = eigenvalues[:,:,0] - self.efermi
if self.bandgap:
eigenvalues_bg = eigenvalues
kpoints = np.array(self.eigenval.kpoints)
if self.hse:
kpoint_weights = np.array(self.eigenval.kpoints_weights)
zero_weight = np.where(kpoint_weights == 0)[0]
eigenvalues = eigenvalues[:,zero_weight]
if self.bandgap:
eigenvalues_bg = eigenvalues_bg[:, zero_weight]
kpoints = kpoints[zero_weight]
if self.bandgap:
self._get_bandgap(eigenvalues=eigenvalues_bg)
band_data = np.append(
eigenvalues,
np.tile(kpoints, (eigenvalues.shape[0],1,1)),
axis=2,
)
np.save(os.path.join(self.folder, 'eigenvalues.npy'), band_data)
if len(self.eigenval.eigenvalues.keys()) > 1:
eigenvalues = eigenvalues[:,:,[0,2]]
else:
eigenvalues = eigenvalues[:,:,0]
if len(self.eigenval.eigenvalues.keys()) > 1:
eigenvalues = eigenvalues[:,:,spin]
return eigenvalues, kpoints
def _load_bands_unfold(self):
if self.spin == 'up':
spin = 0
if self.spin == 'down':
spin = 1
kpath = make_kpath(self.high_symm_points, nseg=self.n)
if self.pre_loaded_bands:
with open(os.path.join(self.folder, 'unfolded_eigenvalues.npy'), 'rb') as eigenvals:
band_data = np.load(eigenvals)
else:
wavecar_data = unfold(
M=self.M,
wavecar=self.wavecar,
lsorbit=self.lsorbit,
)
band_data = wavecar_data.spectral_weight(kpath)
np.save(os.path.join(self.folder, 'unfolded_eigenvalues.npy'), band_data)
band_data = np.transpose(band_data[spin], axes=(2,1,0))
eigenvalues, spectral_weights, K_indices = band_data
eigenvalues = eigenvalues - self.efermi
kpath = np.array(kpath)
return eigenvalues, spectral_weights, K_indices, kpath
def _load_projected_bands(self):
"""
This function loads the project weights of the orbitals in each band
from vasprun.xml into a dictionary of the form:
band index --> atom index --> weights of orbitals
Returns:
projected_dict (dict([str][int][pd.DataFrame])): Dictionary containing the projected weights of all orbitals on each atom for each band.
"""
if self.lsorbit:
spin = 0
elif self.spin == 'up':
spin = 0
elif self.spin == 'down':
spin = 1
if not os.path.isfile(os.path.join(self.folder, 'PROCAR_repaired')):
UtilsProcar().ProcarRepair(
os.path.join(self.folder, 'PROCAR'),
os.path.join(self.folder, 'PROCAR_repaired'),
)
if self.pre_loaded_projections:
with open(os.path.join(self.folder, 'projected_eigenvalues.npy'), 'rb') as projected_eigenvals:
projected_eigenvalues = np.load(projected_eigenvals)
else:
parser = ProcarParser()
parser.readFile(os.path.join(self.folder, 'PROCAR_repaired'))
if self.ispin and not self.lsorbit and np.sum(self.poscar.natoms) == 1:
shape = int(parser.spd.shape[1] / 2)
projected_eigenvalues_up = np.transpose(parser.spd[:,:shape,0,:,1:-1], axes=(1,0,2,3))
projected_eigenvalues_down = np.transpose(parser.spd[:,shape:,0,:,1:-1], axes=(1,0,2,3))
projected_eigenvalues = np.concatenate(
[projected_eigenvalues_up[:,:,:,:,np.newaxis], projected_eigenvalues_down[:,:,:,:,np.newaxis]],
axis=4
)
projected_eigenvalues = np.transpose(projected_eigenvalues, axes=(0,1,4,2,3))
elif self.ispin and not self.lsorbit and np.sum(self.poscar.natoms) != 1:
shape = int(parser.spd.shape[1] / 2)
projected_eigenvalues_up = np.transpose(parser.spd[:,:shape,0,:-1,1:-1], axes=(1,0,2,3))
projected_eigenvalues_down = np.transpose(parser.spd[:,shape:,0,:-1,1:-1], axes=(1,0,2,3))
projected_eigenvalues = np.concatenate(
[projected_eigenvalues_up[:,:,:,:,np.newaxis], projected_eigenvalues_down[:,:,:,:,np.newaxis]],
axis=4
)
projected_eigenvalues = np.transpose(projected_eigenvalues, axes=(0,1,4,2,3))
else:
if np.sum(self.poscar.natoms) == 1:
projected_eigenvalues = np.transpose(parser.spd[:,:,:,:, 1:-1], axes=(1,0,2,3,4))
else:
projected_eigenvalues = np.transpose(parser.spd[:,:,:,:-1, 1:-1], axes=(1,0,2,3,4))
np.save(os.path.join(self.folder, 'projected_eigenvalues.npy'), projected_eigenvalues)
projected_eigenvalues = projected_eigenvalues[:,:,spin,:,:]
if self.hse:
kpoint_weights = np.array(self.eigenval.kpoints_weights)
zero_weight = np.where(kpoint_weights == 0)[0]
projected_eigenvalues = projected_eigenvalues[:,zero_weight]
if projected_eigenvalues.shape[-1] == 16:
self.forbitals = True
projected_eigenvalues = np.square(projected_eigenvalues)
return projected_eigenvalues
def _sum_spd(self, spd):
"""
This function sums the weights of the s, p, and d orbitals for each atom
and creates a dictionary of the form:
band index --> s,p,d orbital weights
Returns:
spd_dict (dict([str][pd.DataFrame])): Dictionary that contains the summed weights for the s, p, and d orbitals for each band
"""
if not self.forbitals:
spd_indices = [np.array([False for _ in range(9)]) for i in range(3)]
spd_indices[0][0] = True
spd_indices[1][1:4] = True
spd_indices[2][4:] = True
else:
spd_indices = [np.array([False for _ in range(16)]) for i in range(4)]
spd_indices[0][0] = True
spd_indices[1][1:4] = True
spd_indices[2][4:9] = True
spd_indices[3][9:] = True
orbital_contributions = np.sum(self.projected_eigenvalues, axis=2)
spd_contributions = np.transpose(
np.array([
np.sum(orbital_contributions[:,:,ind], axis=2) for ind in spd_indices
]), axes=[1,2,0]
)
# norm_term = np.sum(spd_contributions, axis=2)[:,:,np.newaxis]
# spd_contributions = np.divide(spd_contributions, norm_term, out=np.zeros_like(spd_contributions), where=norm_term!=0)
spd_contributions = spd_contributions[:,:,[self.spd_relations[orb] for orb in spd]]
return spd_contributions
def _sum_orbitals(self, orbitals):
"""
This function finds the weights of desired orbitals for all atoms and
returns a dictionary of the form:
band index --> orbital index
Parameters:
orbitals (list): List of desired orbitals.
0 = s
1 = py
2 = pz
3 = px
4 = dxy
5 = dyz
6 = dz2
7 = dxz
8 = dx2-y2
9 = fy3x2
10 = fxyz
11 = fyz2
12 = fz3
13 = fxz2
14 = fzx3
15 = fx3
Returns:
orbital_dict (dict[str][pd.DataFrame]): Dictionary that contains the projected weights of the selected orbitals.
"""
orbital_contributions = self.projected_eigenvalues.sum(axis=2)
# norm_term = np.sum(orbital_contributions, axis=2)[:,:,np.newaxis]
# orbital_contributions = np.divide(orbital_contributions, norm_term, out=np.zeros_like(orbital_contributions), where=norm_term!=0)
orbital_contributions = orbital_contributions[:,:,[orbitals]]
return orbital_contributions
def _sum_atoms(self, atoms, spd=False):
"""
This function finds the weights of desired atoms for all orbitals and
returns a dictionary of the form:
band index --> atom index
Parameters:
atoms (list): List of desired atoms where atom 0 is the first atom in
the POSCAR file.
Returns:
atom_dict (dict[str][pd.DataFrame]): Dictionary that contains the projected
weights of the selected atoms.
"""
if spd:
if not self.forbitals:
spd_indices = [np.array([False for _ in range(9)]) for i in range(3)]
spd_indices[0][0] = True
spd_indices[1][1:4] = True
spd_indices[2][4:] = True
else:
spd_indices = [np.array([False for _ in range(16)]) for i in range(4)]
spd_indices[0][0] = True
spd_indices[1][1:4] = True
spd_indices[2][4:9] = True
spd_indices[3][9:] = True
atoms_spd = np.transpose(np.array([
np.sum(self.projected_eigenvalues[:,:,:,ind], axis=3) for ind in spd_indices
]), axes=(1,2,3,0))
# atoms_spd = atoms_spd[:,:,[atoms], :]
# norm_term = np.sum(atoms_spd_to_norm, axis=(2,3))[:,:, np.newaxis]
# atoms_spd = np.divide(atoms_spd, norm_term, out=np.zeros_like(atoms_spd), where=norm_term!=0)
return atoms_spd
else:
atoms_array = self.projected_eigenvalues.sum(axis=3)
# norm_term = np.sum(atoms_array, axis=2)[:,:,np.newaxis]
# atoms_array = np.divide(atoms_array, norm_term, out=np.zeros_like(atoms_array), where=norm_term!=0)
atoms_array = atoms_array[:,:,[atoms]]
return atoms_array
def _sum_elements(self, elements, orbitals=False, spd=False, spd_options=None):
"""
This function sums the weights of the orbitals of specific elements within the
calculated structure and returns a dictionary of the form:
band index --> element label --> orbital weights for orbitals = True
band index --> element label for orbitals = False
This is useful for structures with many elements because manually entering indicies is
not practical for large structures.
Parameters:
elements (list): List of element symbols to sum the weights of.
orbitals (bool): Determines whether or not to inclue orbitals or not
(True = keep orbitals, False = sum orbitals together )
spd (bool): Determines whether or not to sum the s, p, and d orbitals
Returns:
element_dict (dict([str][str][pd.DataFrame])): Dictionary that contains the summed weights for each orbital for a given element in the structure.
"""
poscar = self.poscar
natoms = poscar.natoms
symbols = poscar.site_symbols
projected_eigenvalues = self.projected_eigenvalues
element_list = np.hstack(
[[symbols[i] for j in range(natoms[i])] for i in range(len(symbols))]
)
element_indices = [np.where(np.isin(element_list, element))[0] for element in elements]
element_orbitals = np.transpose(
np.array([
np.sum(projected_eigenvalues[:,:,ind,:], axis=2) for ind in element_indices
]), axes=(1,2,0,3)
)
if orbitals:
return element_orbitals
elif spd:
if not self.forbitals:
spd_indices = [np.array([False for _ in range(9)]) for i in range(3)]
spd_indices[0][0] = True
spd_indices[1][1:4] = True
spd_indices[2][4:] = True
else:
spd_indices = [np.array([False for _ in range(16)]) for i in range(4)]
spd_indices[0][0] = True
spd_indices[1][1:4] = True
spd_indices[2][4:9] = True
spd_indices[3][9:] = True
element_spd = np.transpose(np.array([
np.sum(element_orbitals[:,:,:,ind], axis=3) for ind in spd_indices
]), axes=(1,2,3,0))
# norm_term = np.sum(element_spd, axis=(2,3))[:,:,np.newaxis, np.newaxis]
# element_spd = np.divide(element_spd, norm_term, out=np.zeros_like(element_spd), where=norm_term!=0)
return element_spd
else:
element_array = np.sum(element_orbitals, axis=3)
# norm_term = np.sum(element_array, axis=2)[:,:,np.newaxis]
# element_array = np.divide(element_array, norm_term, out=np.zeros_like(element_array), where=norm_term!=0)
return element_array
def _get_k_distance(self):
cell = self.poscar.structure.lattice.matrix
kpt_c = np.dot(self.kpoints, np.linalg.inv(cell).T)
kdist = np.r_[0, np.cumsum(np.linalg.norm( np.diff(kpt_c,axis=0), axis=1))]
return kdist
def _get_kticks(self, ax):
"""
This function extracts the kpoint labels and index locations for a regular
band structure calculation (non unfolded).
Parameters:
ax (matplotlib.pyplot.axis): Axis to append the tick labels
"""
high_sym_points = self.kpoints_file.kpts
kpts_labels = np.array([f'${k}$' if k != 'G' else '$\\Gamma$' for k in self.kpoints_file.labels])
all_kpoints = self.kpoints
index = [0]
for i in range(len(high_sym_points) - 2):
if high_sym_points[i + 2] != high_sym_points[i + 1]:
index.append(i)
index.append(len(high_sym_points) - 1)
kpts_loc = np.isin(np.round(all_kpoints, 3), np.round(high_sym_points, 3)).all(1)
kpoints_index = np.where(kpts_loc == True)[0]
kpts_labels = kpts_labels[index]
kpoints_index = list(kpoints_index[index])
kpoints_index = ax.lines[0].get_xdata()[kpoints_index]
for k in kpoints_index:
ax.axvline(x=k, color='black', alpha=0.7, linewidth=0.5)
ax.set_xticks(kpoints_index)
ax.set_xticklabels(kpts_labels)
def _get_kticks_hse(self, ax, kpath):
structure = self.poscar.structure
kpath_obj = HighSymmKpath(structure)
kpath_labels = np.array(list(kpath_obj._kpath['kpoints'].keys()))
kpath_coords = np.array(list(kpath_obj._kpath['kpoints'].values()))
index = np.where((self.kpoints[:, None] == kpath_coords).all(-1).any(-1) == True)[0]
index = [index[0]] + [index[i] for i in range(1,len(index)-1) if i % 2] + [index[-1]]
kpoints_in_band = self.kpoints[index]
label_index = []
for i in range(kpoints_in_band.shape[0]):
for j in range(kpath_coords.shape[0]):
if (kpoints_in_band[i] == kpath_coords[j]).all():
label_index.append(j)
kpoints_index = index
kpath = kpath_labels[label_index]
kpoints_index = ax.lines[0].get_xdata()[kpoints_index]
kpath = [f'${k}$' if k != 'G' else '$\\Gamma$' for k in kpath]
for k in kpoints_index:
ax.axvline(x=k, color='black', alpha=0.7, linewidth=0.5)
plt.xticks(kpoints_index, kpath)
def _get_kticks_unfold(self, ax, wave_vectors):
kpath = [
f'${k}$' if k != 'G' else '$\\Gamma$' for k in self.kpath.upper().strip()
]
kpoints_index = [0] + [(self.n * i) for i in range(1, len(self.kpath))]
for k in kpoints_index:
ax.axvline(x=wave_vectors[k], color='black', alpha=0.7, linewidth=0.5)
ax.set_xticks(wave_vectors[kpoints_index])
ax.set_xticklabels(kpath)
# plt.xticks(np.array(kpoints)[kpoints_index], kpath)
def _filter_bands(self, erange):
eigenvalues = self.eigenvalues
where = (eigenvalues >= np.min(erange)) & (eigenvalues <= np.max(erange))
is_true = np.sum(np.isin(where, True), axis=1)
bands_in_plot = is_true > 0
return bands_in_plot
def _add_legend(self, ax, names, colors, fontsize=5, markersize=2):
legend_lines = []
legend_labels = []
for name, color in zip(names, colors):
legend_lines.append(plt.Line2D(
[0],
[0],
marker='o',
markersize=markersize,
linestyle='',
color=color
))
legend_labels.append(
f'${name}$'
)
leg = ax.get_legend()
if leg is None:
handles = legend_lines
labels = legend_labels
else:
handles = [l._legmarker for l in leg.legendHandles]
labels = [text._text for text in leg.texts]
handles.extend(legend_lines)
labels.extend(legend_labels)
ax.legend(
handles,
labels,
ncol=1,
loc='upper left',
fontsize=fontsize,
bbox_to_anchor=(1, 1),
borderaxespad=0,
frameon=False,
handletextpad=0.1,
)
def plot_plain(self, ax, color='black', erange=[-6,6], linewidth=1.25, scale_factor=20, linestyle='-'):
"""
This function plots a plain band structure.
Parameters:
ax (matplotlib.pyplot.axis): Axis to plot the data on
color (str): Color of the band structure lines
linewidth (float): Line width of the band structure lines
linestyle (str): Line style of the bands
"""
bands_in_plot = self._filter_bands(erange=erange)
eigenvalues = self.eigenvalues[bands_in_plot]
wave_vectors = self._get_k_distance()
# if self.unfold:
# wave_vectors = (wave_vectors / np.max(wave_vectors)) * 5
eigenvalues_ravel = np.ravel(np.c_[eigenvalues, np.empty(eigenvalues.shape[0]) * np.nan])
wave_vectors_tile = np.tile(np.append(wave_vectors, np.nan), eigenvalues.shape[0])
if self.unfold:
spectral_weights = self.spectral_weights[bands_in_plot]
spectral_weights = spectral_weights / np.max(spectral_weights)
spectral_weights_ravel = np.ravel(np.c_[spectral_weights, np.empty(spectral_weights.shape[0]) * np.nan])
ax.scatter(
wave_vectors_tile,
eigenvalues_ravel,
c=color,
ec=None,
s=scale_factor * spectral_weights_ravel,
zorder=0,
)
else:
ax.plot(
wave_vectors_tile,
eigenvalues_ravel,
color=color,
linewidth=linewidth,
linestyle=linestyle,
zorder=0,
)
if self.hse:
self._get_kticks_hse(ax=ax, kpath=self.kpath)
elif self.unfold:
self._get_kticks_unfold(ax=ax, wave_vectors=wave_vectors)
else:
self._get_kticks(ax=ax)
ax.set_xlim(0, np.max(wave_vectors))
def _plot_projected_general(self, ax, projected_data, colors, scale_factor=5, erange=[-6,6], display_order=None, linewidth=0.75, band_color='black'):
"""
This is a general method for plotting projected data
Parameters:
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
color_dict (dict[str][str]): This option allow the colors of each orbital
specified. Should be in the form of:
{'orbital index': <color>, 'orbital index': <color>, ...}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
"""
if self.unfold:
band_color = [(0.9,0.9,0.9)]
scale_factor = scale_factor * 4
self.plot_plain(ax=ax, linewidth=linewidth, color=band_color, erange=erange)
bands_in_plot = self._filter_bands(erange=erange)
projected_data = projected_data[bands_in_plot]
projected_data = projected_data / np.max(projected_data)
wave_vectors = self._get_k_distance()
eigenvalues = self.eigenvalues[bands_in_plot]
if self.unfold:
spectral_weights = self.spectral_weights[bands_in_plot]
spectral_weights = spectral_weights / np.max(spectral_weights)
K_indices = np.array(self.K_indices[0], dtype=int)
projected_data = projected_data[:, K_indices, :]
spectral_weights_ravel = np.repeat(np.ravel(spectral_weights), projected_data.shape[-1])
projected_data_ravel = np.ravel(projected_data)
wave_vectors_tile = np.tile(
np.repeat(wave_vectors, projected_data.shape[-1]), projected_data.shape[0]
)
eigenvalues_tile = np.repeat(np.ravel(eigenvalues), projected_data.shape[-1])
colors_tile = np.tile(colors, np.prod(projected_data.shape[:-1]))
if display_order is None:
pass
else:
sort_index = np.argsort(projected_data_ravel)
if display_order == 'all':
sort_index = sort_index[::-1]
wave_vectors_tile = wave_vectors_tile[sort_index]
eigenvalues_tile = eigenvalues_tile[sort_index]
colors_tile = colors_tile[sort_index]
projected_data_ravel = projected_data_ravel[sort_index]
if self.unfold:
spectral_weights_ravel = spectral_weights_ravel[sort_index]
if self.unfold:
s = scale_factor * projected_data_ravel * spectral_weights_ravel
ec = None
else:
s = scale_factor * projected_data_ravel
ec = colors_tile
ax.scatter(
wave_vectors_tile,
eigenvalues_tile,
c=colors_tile,
ec=ec,
s=s,
zorder=100,
)
def plot_orbitals(self, ax, orbitals, scale_factor=5, erange=[-6,6], display_order=None, color_list=None, legend=True, linewidth=0.75, band_color='black'):
"""
This function plots the projected band structure of given orbitals summed across all atoms on a given axis.
Parameters:
ax (matplotlib.pyplot.axis): Axis to plot the data on
orbitals (list): List of orbits to compare
| 0 = s
| 1 = py
| 2 = pz
| 3 = px
| 4 = dxy
| 5 = dyz
| 6 = dz2
| 7 = dxz
| 8 = dx2-y2
| 9 = fy3x2
| 10 = fxyz
| 11 = fyz2
| 12 = fz3
| 13 = fxz2
| 14 = fzx3
| 15 = fx3
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
color_dict (dict[str][str]): This option allow the colors of each orbital
specified. Should be in the form of:
{'orbital index': <color>, 'orbital index': <color>, ...}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
"""
if color_list is None:
colors = np.array([self.color_dict[i] for i in orbitals])
else:
colors = color_list
projected_data = self._sum_orbitals(orbitals=orbitals)
self._plot_projected_general(
ax=ax,
projected_data=projected_data,
colors=colors,
scale_factor=scale_factor,
erange=erange,
display_order=display_order,
linewidth=linewidth,
band_color=band_color
)
if legend:
self._add_legend(ax, names=[self.orbital_labels[i] for i in orbitals], colors=colors)
def plot_spd(self, ax, scale_factor=5, orbitals='spd', erange=[-6,6], display_order=None, color_dict=None, legend=True, linewidth=0.75, band_color='black'):
"""
This function plots the s, p, d projected band structure onto a given axis
Parameters:
ax (matplotlib.pyplot.axis): Axis to plot the data on
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
"""
if color_dict is None:
color_dict = {
0: self.color_dict[0],
1: self.color_dict[1],
2: self.color_dict[2],
3: self.color_dict[4],
}
colors = np.array([color_dict[self.spd_relations[i]] for i in orbitals])
projected_data = self._sum_spd(spd=orbitals)
self._plot_projected_general(
ax=ax,
projected_data=projected_data,
colors=colors,
scale_factor=scale_factor,
erange=erange,
display_order=display_order,
linewidth=linewidth,
band_color=band_color
)
if legend:
self._add_legend(ax, names=[i for i in orbitals], colors=colors)
def plot_atoms(self, ax, atoms, scale_factor=5, erange=[-6,6], display_order=None, color_list=None, legend=True, linewidth=0.75, band_color='black'):
"""
This function plots the projected band structure of given atoms summed across all orbitals on a given axis.
Parameters:
ax (matplotlib.pyplot.axis): Axis to plot the data on
atoms (list): List of atoms to project onto
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
color_list (list): List of colors of the same length as the atoms list
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
"""
if color_list is None:
colors = np.array([self.color_dict[i] for i in range(len(atoms))])
else:
colors = color_list
projected_data = self._sum_atoms(atoms=atoms)
self._plot_projected_general(
ax=ax,
projected_data=projected_data,
colors=colors,
scale_factor=scale_factor,
erange=erange,
display_order=display_order,
linewidth=linewidth,
band_color=band_color
)
if legend:
self._add_legend(ax, names=atoms, colors=colors)
def plot_atom_orbitals(self, ax, atom_orbital_dict, scale_factor=5, erange=[-6,6], display_order=None, color_list=None, legend=True, linewidth=0.75, band_color='black'):
"""
This function plots the projected band structure of individual orbitals on a given axis.
Parameters:
ax (matplotlib.pyplot.axis): Axis to plot the data on
atom_orbital_pairs (list[list]): Selected orbitals on selected atoms to plot.
This should take the form of [[atom index, orbital_index], ...].
To plot the px orbital of the 1st atom and the pz orbital of the 2nd atom
in the POSCAR file, the input would be [[0, 3], [1, 2]]
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
color_list (list): List of colors of the same length as the atom_orbital_pairs
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
"""
atom_indices = list(atom_orbital_dict.keys())
orbital_indices = list(atom_orbital_dict.values())
number_orbitals = [len(i) for i in orbital_indices]
atom_indices = np.repeat(atom_indices, number_orbitals)
orbital_symbols_long = np.hstack([
[self.orbital_labels[o] for o in orb] for orb in orbital_indices
])
orbital_indices_long = np.hstack(orbital_indices)
indices = np.vstack([atom_indices, orbital_indices_long]).T
projected_data = self.projected_eigenvalues
projected_data = np.transpose(np.array([
projected_data[:,:,ind[0],ind[1]] for ind in indices
]), axes=(1,2,0))
if color_list is None:
colors = np.array([self.color_dict[i] for i in range(len(orbital_indices_long))])
else:
colors = color_list
self._plot_projected_general(
ax=ax,
projected_data=projected_data,
colors=colors,
scale_factor=scale_factor,
erange=erange,
display_order=display_order,
linewidth=linewidth,
band_color=band_color
)
if legend:
self._add_legend(
ax,
names=[f'{i[0]}({i[1]})' for i in zip(atom_indices, orbital_symbols_long)],
colors=colors
)
def plot_atom_spd(self, ax, atom_spd_dict, scale_factor=5, erange=[-6,6], display_order=None, color_list=None, legend=True, linewidth=0.75, band_color='black'):
"""
This function plots the projected band structure on the s, p, and d orbitals for each specified atom in the calculated structure.
Parameters:
ax (matplotlib.pyplot.axis): Axis to plot the data on
atom_spd_dict (dict): Dictionary to determine the atom and spd orbitals to project onto
Format: {0: 'spd', 1: 'sp', 2: 's'} where 0,1,2 are atom indicies in the POSCAR
display_order (None or str): The available options are None, 'all', 'dominant' where None
plots the scatter points in the order presented in the atom_spd_dict, 'all' plots the
scatter points largest --> smallest to all points are visable, and 'dominant' plots
the scatter points smallest --> largest so only the dominant color is visable.
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
"""
atom_indices = list(atom_spd_dict.keys())
orbital_symbols = list(atom_spd_dict.values())
number_orbitals = [len(i) for i in orbital_symbols]
atom_indices = np.repeat(atom_indices, number_orbitals)
orbital_symbols_long = np.hstack([[o for o in orb] for orb in orbital_symbols])
orbital_indices = np.hstack([[self.spd_relations[o] for o in orb] for orb in orbital_symbols])
indices = np.vstack([atom_indices, orbital_indices]).T
projected_data = self._sum_atoms(atoms=atom_indices, spd=True)
projected_data = np.transpose(np.array([
projected_data[:,:,ind[0],ind[1]] for ind in indices
]), axes=(1,2,0))
if color_list is None:
colors = np.array([self.color_dict[i] for i in range(len(orbital_symbols_long))])
else:
colors = color_list
self._plot_projected_general(
ax=ax,
projected_data=projected_data,
colors=colors,
scale_factor=scale_factor,
erange=erange,
display_order=display_order,
linewidth=linewidth,
band_color=band_color
)
if legend:
self._add_legend(
ax,
names=[f'{i[0]}({i[1]})' for i in zip(atom_indices, orbital_symbols_long)],
colors=colors
)
def plot_elements(self, ax, elements, scale_factor=5, erange=[-6,6], display_order=None, color_list=None, legend=True, linewidth=0.75, band_color='black'):
"""
This function plots the projected band structure on specified elements in the calculated structure
Parameters:
ax (matplotlib.pyplot.axis): Axis to plot the data on
elements (list): List of element symbols to project onto
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
color_list (list): List of colors of the same length as the elements list
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
"""
if color_list is None:
colors = np.array([self.color_dict[i] for i in range(len(elements))])
else:
colors = color_list
projected_data = self._sum_elements(elements=elements)
self._plot_projected_general(
ax=ax,
projected_data=projected_data,
colors=colors,
scale_factor=scale_factor,
erange=erange,
display_order=display_order,
linewidth=linewidth,
band_color=band_color
)
if legend:
self._add_legend(ax, names=elements, colors=colors)
def plot_element_orbitals(self, ax, element_orbital_dict, scale_factor=5, erange=[-6,6], display_order=None, color_list=None, legend=True, linewidth=0.75, band_color='black'):
"""
this function plots the projected band structure on chosen orbitals for each specified element in the calculated structure.
Parameters:
ax (matplotlib.pyplot.axis): axis to plot the data on
element_orbital_pairs (list[list]): List of list in the form of
[[element symbol, orbital index], [element symbol, orbital_index], ...]
scale_factor (float): factor to scale weights. this changes the size of the
points in the scatter plot
color_list (list): List of colors of the same length as the element_orbital_pairs
legend (bool): determines if the legend should be included or not.
linewidth (float): line width of the plain band structure plotted in the background
band_color (string): color of the plain band structure
"""
element_symbols = list(element_orbital_dict.keys())
orbital_indices = list(element_orbital_dict.values())
number_orbitals = [len(i) for i in orbital_indices]
element_symbols_long = np.repeat(element_symbols, number_orbitals)
element_indices = np.repeat(range(len(element_symbols)), number_orbitals)
orbital_symbols_long = np.hstack([[self.orbital_labels[o] for o in orb] for orb in orbital_indices])
orbital_indices_long = np.hstack(orbital_indices)
indices = np.vstack([element_indices, orbital_indices_long]).T
projected_data = self._sum_elements(elements=element_symbols, orbitals=True)
projected_data = np.transpose(np.array([
projected_data[:,:,ind[0],ind[1]] for ind in indices
]), axes=(1,2,0))
if color_list is None:
colors = np.array([self.color_dict[i] for i in range(len(orbital_indices_long))])
else:
colors = color_list
self._plot_projected_general(
ax=ax,
projected_data=projected_data,
colors=colors,
scale_factor=scale_factor,
erange=erange,
display_order=display_order,
linewidth=linewidth,
band_color=band_color
)
if legend:
self._add_legend(
ax,
names=[f'{i[0]}({i[1]})' for i in zip(element_symbols_long, orbital_symbols_long)],
colors=colors
)
def plot_element_spd(self, ax, element_spd_dict, scale_factor=5, erange=[-6,6], display_order=None, color_list=None, legend=True, linewidth=0.75, band_color='black'):
"""
This function plots the projected band structure on the s, p, and d orbitals for each specified element in the calculated structure.
Parameters:
ax (matplotlib.pyplot.axis): Axis to plot the data on
elements (list): List of element symbols to project onto
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under other orbitals because they have a larger weight. For example, if the
signitures of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float):12 Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
"""
element_symbols = list(element_spd_dict.keys())
orbital_symbols = list(element_spd_dict.values())
number_orbitals = [len(i) for i in orbital_symbols]
element_symbols_long = np.repeat(element_symbols, number_orbitals)
element_indices = np.repeat(range(len(element_symbols)), number_orbitals)
orbital_symbols_long = np.hstack([[o for o in orb] for orb in orbital_symbols])
orbital_indices = np.hstack([[self.spd_relations[o] for o in orb] for orb in orbital_symbols])
indices = np.vstack([element_indices, orbital_indices]).T
projected_data = self._sum_elements(elements=element_symbols, spd=True)
projected_data = np.transpose(np.array([
projected_data[:,:,ind[0],ind[1]] for ind in indices
]), axes=(1,2,0))
if color_list is None:
colors = np.array([self.color_dict[i] for i in range(len(orbital_symbols_long))])
else:
colors = color_list
self._plot_projected_general(
ax=ax,
projected_data=projected_data,
colors=colors,
scale_factor=scale_factor,
erange=erange,
display_order=display_order,
linewidth=linewidth,
band_color=band_color
)
if legend:
self._add_legend(
ax,
names=[f'{i[0]}({i[1]})' for i in zip(element_symbols_long, orbital_symbols_long)],
colors=colors
)
if __name__ == "__main__":
M = [[-1,1,0],[-1,-1,1],[0,0,1]]
high_symm_points = [
[0.5, 0.5, 0],
[0.0, 0.0, 0],
[0.5, 0.5, 0]
]
band = Band(
folder="../../vaspvis_data/band-unfold",
projected=True,
unfold=True,
kpath='XGX',
high_symm_points=high_symm_points,
n=30,
M=M,
)
fig, ax = plt.subplots(figsize=(3,4), dpi=300)
start = time.time()
# band.plot_plain(ax=ax, color=[(0.9,0.9,0.9)])
# band.plot_spd(ax=ax, orbitals='sd', display_order='all', scale_factor=35, erange=[-5,0])
# band.plot_orbitals(ax=ax, scale_factor=35, orbitals=range(8), display_order=None)
band.plot_atoms(
ax=ax,
atoms=[0,1],
display_order='dominant',
scale_factor=20,
erange=[-5,0],
)
# ax.set_aspect(3, adjustable='datalim')
end = time.time()
print(end-start)
ax.set_ylabel('$E - E_{F}$ $(eV)$', fontsize=6)
ax.tick_params(labelsize=6, length=2.5)
ax.tick_params(axis='x', length=0)
ax.set_ylim(-5,0)
plt.tight_layout(pad=0.2)
plt.savefig('unfold_spd_dominant.png')
| python |
#!/usr/bin/env python
# mainly taken from https://github.com/rochaporto/collectd-openstack
import collectd
import datetime
import traceback
class Base(object):
def __init__(self):
self.username = 'admin'
self.password = 'admin'
self.verbose = False
self.debug = False
self.prefix = ''
self.instance = 'localhost'
self.interval = 10.0
def config_callback(self, conf):
"""Takes a collectd conf object and fills in the local config."""
for node in conf.children:
if node.key == "Username":
self.username = node.values[0]
elif node.key == "Password":
self.password = node.values[0]
elif node.key == "Instance":
self.instance = node.values[0]
elif node.key == "Verbose":
if node.values[0] in ['True', 'true']:
self.verbose = True
elif node.key == "Debug":
if node.values[0] in ['True', 'true']:
self.debug = True
elif node.key == "Prefix":
self.prefix = node.values[0]
elif node.key == 'Interval':
self.interval = float(node.values[0])
else:
collectd.warning("{}: unknown config key: {}".format(self.prefix, node.key))
def dispatch(self, stats):
"""
Dispatches the given stats.
stats should be something like:
{'plugin': {'plugin_instance': {'type': {'type_instance': <value>, ...}}}}
"""
if not stats:
collectd.error("{}: failed to retrieve stats".format(self.prefix))
return
self.logdebug("dispatching {} new stats :: {}".format(len(stats), stats))
try:
for plugin in stats.keys():
for plugin_instance in stats[plugin].keys():
for group in stats[plugin][plugin_instance].keys():
group_value = stats[plugin][plugin_instance][group]
if not isinstance(group_value, dict):
self.dispatch_value(plugin, plugin_instance, group, None, group_value)
else:
for type_instance in stats[plugin][plugin_instance][group].keys():
self.dispatch_value(plugin, plugin_instance,
group, type_instance,
stats[plugin][plugin_instance][group][type_instance])
except Exception as exc:
collectd.error("{}: failed to dispatch values :: {} :: {}".format(self.prefix, exc,
traceback.format_exc()))
def dispatch_value(self, plugin, plugin_instance, group, type_instance, value):
"""Looks for the given stat in stats, and dispatches it"""
self.logdebug("dispatching value {}.{}.{}.{}={}".format(plugin, plugin_instance,
group, type_instance, value))
val = collectd.Values(type='gauge')
val.plugin = plugin
val.plugin_instance = plugin_instance
# the documentation says it must be initialized with a valid type from
# the types.db, but it works also with any other string and is easier
# to "group" by this in Grafana
# maybe this fails for other databases than InfluxDB? then revert back to
# val.type_instance="{}-{}".format(group, type_instance)
if type_instance is not None:
val.type_instance = "{}-{}".format(group, type_instance)
#val.type = group
#val.type_instance = type_instance
else:
val.type_instance = group
val.values = [value]
val.interval = self.interval
val.dispatch()
self.logdebug("sent metric {}.{}.{}.{}.{}".format(plugin, plugin_instance,
group, type_instance, value))
def read_callback(self):
try:
start = datetime.datetime.now()
stats = self.get_stats()
self.logverbose("collectd new data from service :: took {} seconds".format((datetime.datetime.now() - start).seconds))
except Exception as exc:
collectd.error("{}: failed to get stats :: {} :: {}".format(self.prefix, exc,
traceback.format_exc()))
self.dispatch(stats)
def get_stats(self):
collectd.error('Not implemented, should be subclassed')
def logverbose(self, msg):
if self.verbose:
collectd.info("{}: {}".format(self.prefix, msg))
def logdebug(self, msg):
if self.debug:
collectd.info("{}: {}".format(self.prefix, msg))
| python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
import cv2
from utils.misc import get_center
Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])
def get_gauss_filter_weight(width, height, mu_x, mu_y, sigma=7):
xy = np.indices((height,width))
x = xy[1,:,:]
y = xy[0,:,:]
psf = np.exp(-(((x-mu_x)**2+(y-mu_y)**2)/(2*sigma**2))) # not multiple by 2
return psf
def get_template_correlation_response(im_size=225, out_size=None):
# out_size = [width, height]
# output = [H,W]
gauss_response = get_gauss_filter_weight(im_size, im_size, im_size//2, im_size//2)
if out_size is not None:
gauss_response = cv2.resize(gauss_response, tuple(out_size))
return gauss_response
def batch_fft2d(inputs, transpose=True):
# inputs: [B,H,W,C]
if inputs.dtype != tf.complex64:
inputs = tf.cast(inputs, tf.complex64)
if transpose:
inputs = tf.transpose(inputs, [0,3,1,2])
outputs = tf.fft2d(inputs) # [B,C,H,W]
if transpose:
outputs = tf.transpose(outputs, [0,2,3,1]) # [B,H,W,C]
return outputs
def batch_ifft2d(inputs, transpose=True):
# inputs: [B,H,W,C]
if transpose:
inputs = tf.transpose(inputs, [0,3,1,2])
outputs = tf.ifft2d(inputs)
if transpose:
outputs = tf.transpose(outputs, [0,2,3,1]) # [B,H,W,C]
return outputs
def get_cx(rect):
return (rect[0]+rect[2])*0.5
def get_cy(rect):
return (rect[1]+rect[3])*0.5
def get_width(rect):
return (rect[2]-rect[0])
def get_height(rect):
return (rect[3]-rect[1])
def get_area(rect):
return (rect[2]-rect[0]) * (rect[3]-rect[1])
def get_intersection(rect1, rect2):
x1 = max(rect1[0], rect2[0])
y1 = max(rect1[1], rect2[1])
x2 = min(rect1[2], rect2[2])
y2 = min(rect1[3], rect2[3])
return np.array([x1,y1,x2,y2], dtype=rect1.dtype)
def get_IoU(rect1, rect2):
inter = get_intersection(rect1, rect2)
area1 = get_area(rect1)
area2 = get_area(rect2)
area_I = get_area(inter)
IoU = float(area_I) / float(area1 + area2 - area_I)
return IoU
def im2rgb(im):
if len(im.shape) != 3:
im = np.stack([im, im, im], -1)
return im
def convert_bbox_format(bbox, to):
x, y, target_width, target_height = bbox.x, bbox.y, bbox.width, bbox.height
if to == 'top-left-based':
x -= get_center(target_width)
y -= get_center(target_height)
elif to == 'center-based':
y += get_center(target_height)
x += get_center(target_width)
else:
raise ValueError("Bbox format: {} was not recognized".format(to))
return Rectangle(x, y, target_width, target_height)
def get_exemplar_images(images, exemplar_size, targets_pos=None):
"""Crop exemplar image from input images"""
with tf.name_scope('get_exemplar_image'):
batch_size, x_height, x_width = images.get_shape().as_list()[:3]
z_height, z_width = exemplar_size
if targets_pos is None:
# crop from the center
target_pos_single = [[get_center(x_height), get_center(x_width)]]
targets_pos_ = tf.tile(target_pos_single, [batch_size, 1])
else:
targets_pos_ = targets_pos
# convert to top-left corner based coordinates
top = tf.to_int32(tf.round(targets_pos_[:, 0] - get_center(z_height)))
bottom = tf.to_int32(top + z_height)
left = tf.to_int32(tf.round(targets_pos_[:, 1] - get_center(z_width)))
right = tf.to_int32(left + z_width)
def _slice(x):
f, t, l, b, r = x
c = f[t:b, l:r]
return c
exemplar_img = tf.map_fn(_slice, (images, top, left, bottom, right), dtype=images.dtype)
exemplar_img.set_shape([batch_size, z_height, z_width, 3])
return exemplar_img
def get_crops(im, bbox, size_z, size_x, context_amount):
"""Obtain image sub-window, padding with avg channel if area goes outside of border
Adapted from https://github.com/bertinetto/siamese-fc/blob/master/ILSVRC15-curation/save_crops.m#L46
Args:
im: Image ndarray
bbox: Named tuple (x, y, width, height) x, y corresponds to the crops center
size_z: Target + context size
size_x: The resultant crop size
context_amount: The amount of context
Returns:
image crop: Image ndarray
"""
cy, cx, h, w = bbox.y, bbox.x, bbox.height, bbox.width
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
d_search = (size_x - size_z) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
scale_x = size_x / s_x
image_crop_x, _, _, _, _ = get_subwindow_avg(im, [cy, cx],
[size_x, size_x],
[np.round(s_x), np.round(s_x)])
return image_crop_x, scale_x
def get_subwindow_avg(im, pos, model_sz, original_sz):
# avg_chans = np.mean(im, axis=(0, 1)) # This version is 3x slower
avg_chans = [np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])]
if not original_sz:
original_sz = model_sz
sz = original_sz
im_sz = im.shape
# make sure the size is not too small
assert im_sz[0] > 2 and im_sz[1] > 2
c = [get_center(s) for s in sz]
# check out-of-bounds coordinates, and set them to avg_chans
context_xmin = np.int(np.round(pos[1] - c[1]))
context_xmax = np.int(context_xmin + sz[1] - 1)
context_ymin = np.int(np.round(pos[0] - c[0]))
context_ymax = np.int(context_ymin + sz[0] - 1)
left_pad = np.int(np.maximum(0, -context_xmin))
top_pad = np.int(np.maximum(0, -context_ymin))
right_pad = np.int(np.maximum(0, context_xmax - im_sz[1] + 1))
bottom_pad = np.int(np.maximum(0, context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
if top_pad > 0 or bottom_pad > 0 or left_pad > 0 or right_pad > 0:
R = np.pad(im[:, :, 0], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[0]))
G = np.pad(im[:, :, 1], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[1]))
B = np.pad(im[:, :, 2], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[2]))
im = np.stack((R, G, B), axis=2)
im_patch_original = im[context_ymin:context_ymax + 1,
context_xmin:context_xmax + 1, :]
if not (model_sz[0] == original_sz[0] and model_sz[1] == original_sz[1]):
im_patch = cv2.resize(im_patch_original, tuple(model_sz))
else:
im_patch = im_patch_original
return im_patch, left_pad, top_pad, right_pad, bottom_pad
def normalize_01(inputs):
# inputs: [B,H,W,C], tf.float32
mins = tf.reduce_min(inputs, axis=[1,2,3], keep_dims=True)
maxs = tf.reduce_max(inputs, axis=[1,2,3], keep_dims=True)
outputs = (inputs - mins) / (maxs-mins+1e-6)
return outputs
def spatial_softmax(logits):
shape = tf.shape(logits)
flatten = tf.layers.flatten(logits)
softmax = tf.nn.softmax(flatten)
softmax = tf.reshape(softmax, shape)
return softmax
def detect_hard_peak_position(inputs):
# inputs: [B,H,W,1] filter responses
# This function is non-differentiable
# Return: peak positions ([B,2] x,y coordinates, tf.int32)
batch_size, height, width, channels = tf.unstack(tf.shape(inputs))
inputs_flat = tf.layers.flatten(inputs) # [B, H*W]
argmax_inds = tf.argmax(inputs_flat, axis=1, output_type=tf.int32)
argmax_x = tf.cast(tf.mod(argmax_inds, width), tf.int32)
argmax_y = tf.cast(tf.divide(argmax_inds, width), tf.int32)
peak_pos = tf.concat([argmax_x[:,None], argmax_y[:,None]], axis=1) # [B,2]
return peak_pos
| python |
import matplotlib.pyplot as plt
import numpy as np
import wave
import sys
import statistics
########################################
# INPUT PARAMETER #
########################################
bpm = 136
offsetms = 0 # offset for beat grid in ms. Moves the beat grid to the right and is used to align with recording. Set to 0 to have program search for correct offset!
threshold = 5000 # signal threshold that needs to be exceeded to detect a note
deadzone = 0 # deadzone in per cent of subdivision. Beats within the value's percentage are ignored at the beinning and the end of a grid boundary
subdiv = 4 # 1 for quarter notes, 2 for eights, 3 for triplets and so on
cappeaks = True # This can be helpful if the waveform of some sounds has its max value further in the back and not just at the beginning of the onset.
sigma16threshold = 1 # threshold standard deviation for offset finding algorithm in 16th notes
muthreshold = 0.6 # threshold for offset finding algorithm in ms
########################################
def timealyze(bpm, offsetms, threshold, deadzone, subdiv, cappeaks):
file = "output.wav"
spf = wave.open(file, "r")
# Extract Raw Audio from Wav File
signal = spf.readframes(-1)
signal = np.frombuffer(signal, "Int16")
fs = spf.getframerate()
# If Stereo
if spf.getnchannels() == 2:
print("Just mono files")
sys.exit(0)
time = np.linspace(0, len(signal) / fs, num=len(signal))
# normalize signal (didn't work properly, only looked at positive peaks)
maxvalue = np.argmax(signal)
minvalue = np.argmin(signal)
signal = signal / signal[maxvalue] * 2**16 *0.5 #set factor to 0.5 for clean normalization.
# cap peaks
if cappeaks == True:
signal[signal > (signal[maxvalue]*0.7)] = signal[maxvalue]*0.7
deadzonems = deadzone * 60 / bpm / subdiv / 100 *1000 # deadzone in ms after and before grid boundary in which a beat cannot be detected. Prevents beat detection through maximum value at beginning of grid due to crash cymbal noise from beat before
offset = int(offsetms*fs/1000) #offset for beat grid in samples. Used for internal calculations below
deadzone = (deadzonems*fs/1000)
gw = int(fs*60/bpm/subdiv)
#initialize mu and sigma 16
mu = -1
sigma16 = 1
def analyze(offset, mu):
beatindex = []
gridindex = []
targetindex = []
targetdiff = []
xlate = []
xearly = []
targetdiff16 = []
targetdiffms = []
deadzonefill = []
# test out peak finding and filtering
#peaks, _ = find_peaks(signal, height = 5000, prominence = 5000, distance = 1000)
# filter signal
#signal = savgol_filter(signal, 51, 3) # window size 51, polynomial order 3
# apply time grid (based on click tempo and sub division) on wave signal, identify beats and compare to perfectly timed beat
for i in range(int(len(signal)/gw-(offset/gw))):
signalfreg = signal[(gw*(i)+offset):(gw*(i+1)+offset)] # create signal fregment from wave signal according to time grid
gridindex.append((gw*i+offset)/fs) # create list with grid boundaries to plot grid
deadzonefill.append(np.arange(gridindex[i]-deadzonems/1000, gridindex[i]+deadzonems/1000, 0.001))
maxvalue = np.argmax(signalfreg)
if signalfreg[maxvalue] > threshold and ((maxvalue-gridindex[-1]) > deadzone and (gridindex[-1] + gw - maxvalue) > deadzone ): # if statement to prevent peaks in signal noise to be recognized as beats in grid segements without notes
beatindex.append((maxvalue+(gw*i)+offset)/fs) # add index value of recognized beat (converted to seconds)
targetindex.append((gw *i+offset+int(gw/2))/fs) # add index value for perfectly timed beat for comparison#
# fill lists with x values to color fill the area between the perfectly timed and actual beats (late and early seperated to have different colors)
targetdiff.append(beatindex[-1]-targetindex[-1])
if targetdiff[-1] <= 0:
xearly.append(np.arange(beatindex[-1], targetindex[-1], 0.001))
if targetdiff[-1] > 0:
xlate.append(np.arange(targetindex[-1], beatindex[-1], 0.001))
# convert peaks for time in seconds
##peakssec = []
##for i in range(len(peaks)):
## peakssec.append(peaks[i]/fs)
##
##ax1.plot(peakssec, signal[peaks], "x")
# convert targetdiff to percentage of 16th notes
for i in range(len(targetdiff)):
targetdiff16.append(targetdiff[i]* bpm/60*4)
# convert targetdiff to milli seconds
for i in range(len(targetdiff)):
targetdiffms.append(targetdiff[i] * 1000)
sigma16 = statistics.stdev(targetdiff16)
mu = statistics.mean(targetdiffms)
return offset, mu, sigma16, beatindex, gridindex, targetindex, xlate, xearly, targetdiff16, targetdiffms, deadzonefill
###########################################################################################################################
if offsetms == 0:
# create loop to fit offset such that the mean deviation is minimized (the analysis focusses on relative timing rather than absolute timing because there is no absolute timing reference available (e.g. a click))
# simple and ugly brute force while loop without exit condition, fix later
while abs(mu) > muthreshold or sigma16 > sigma16threshold :
try:
offset = int(offsetms*fs/1000) #offset for geat grid in samples. Used for internal calculations below
offset, mu, sigma16, beatindex, gridindex, targetindex, xlate, xearly, targetdiff16, targetdiffms, deadzonefill = analyze(offset, mu)
if abs(mu) > 10:
offsetms +=10
else:
offsetms += 1
print("the offset is {} ms, mu is {} ms and sigma16 is {}".format(round(offsetms,1), round(mu, 2),round(sigma16,2)))
except:
offsetms += 100
continue
offsetms -=1
###############################################################################################################################
#########################
offset, mu, sigma16, beatindex, gridindex, targetindex, xlate, xearly, targetdiff16, targetdiffms, deadzonefill = analyze(offset, mu)
#########################
print("\n")
print("The offset was set to {} ms".format(offsetms))
print("\n")
print ("Im Durchschnitt liegst du {} 16tel neben dem Beat".format(round(sigma16,2)))
print ("Das sind etwa {} ms".format(round(sigma16/4/bpm*60*1000,2)))
print ("\n")
print("Insgesamt wurden {} Schläge detektiert".format(len(beatindex)))
print ("Die Durchschnitssabweichung liegt bei {} ms. Passe den Offset an, um diesen Wert so nahe wie möglich gegen 0 einzustellen".format(round(mu)))
fig = plt.figure(figsize=(16,9))
ax1 = fig.add_subplot(311)
ax1.set_ylabel('Signallevel')
ax1.set_title("timealyze - Timinganalyse")
ax1.plot(time,signal)
ax2 = fig.add_subplot(312,sharex = ax1)
ax2.set_xlabel('Zeit [s]')
ax1.hlines(threshold, 0, max(time), colors='gray', linestyles='dotted', label='Threshold', linewidth = 0.2)
for i in range(len(gridindex)):
ax1.axvline(x=gridindex[i], color = "gray", linestyle = 'dotted', linewidth = 0.6)
ax2.axvline(x=gridindex[i], color = "darkgray", linestyle = 'dotted', linewidth = 0.6)
for i in range(len(beatindex)):
ax1.axvline(x=beatindex[i], color = "orange", linewidth = 0.5)
ax2.axvline(x=beatindex[i], color = "black", linewidth = 0.8)
for i in range(len(targetindex)):
ax2.axvline(targetindex[i], color = "dimgray", linewidth = 0.8)
for i in range (len(xlate)):
ax2.fill_between(xlate[i], 10, -10, facecolor ='red')
for i in range (len(xearly)):
ax2.fill_between(xearly[i], 10, -10, facecolor ='orangered')
for i in range(len(deadzonefill)):
ax1.fill_between(deadzonefill[i], -2**16, 2**16, facecolor ='lightgray', alpha = 0.5)
ax1.set_ylim([-2**16/2, 2**16/2])
ax2.set_ylim([0,1])
mu16 = statistics.mean(targetdiff16)
mums = statistics.mean(targetdiffms)
sigmams = statistics.stdev(targetdiffms)
num_bins = 20
ax3 = fig.add_subplot(337)
n, bins, patches = ax3.hist(targetdiffms, num_bins, density=1)
ax3.set_xlabel("Abweichung vom Sollbeat in ms")
ax3.set_ylabel("Relative Häufigkeit")
ax3.grid(color = 'gray', linestyle = 'dotted', linewidth = 0.3)
y1 = ((1 / (np.sqrt(2 * np.pi) * sigmams)) *
np.exp(-0.5 * (1 / sigmams * (bins - mums))**2))
ax3.plot(bins, y1, '--')
ax3.set_xlim([-1/4*60/bpm*1000,1/4*60/bpm*1000])
ax4 = fig.add_subplot(338)
ax4.hist(targetdiff16, num_bins)
ax4.set_xlabel("Abweichung vom Sollbeat [in 16tel Noten]")
ax4.set_ylabel("Anzahl detektierte Schläge")
ax4.grid(color = 'gray', linestyle = 'dotted', linewidth = 0.3)
ax4.set_xlim([-1,1])
plt.text(0.68, 0.30, "Durchschnittliche Abweichung vom Beat:".format(round(sigma16/4/bpm*60*1000,2)), fontsize=10, transform=plt.gcf().transFigure)
plt.text(0.68, 0.26, " {} ms ".format(round(sigma16/4/bpm*60*1000,2)), fontsize=14, transform=plt.gcf().transFigure)
plt.text(0.68, 0.21, "Das entspricht: ".format(round(sigma16/4/bpm*60*1000,2)), fontsize=10, transform=plt.gcf().transFigure)
plt.text(0.68, 0.17, " {} 16tel-Noten ".format(round(sigma16,2)), fontsize=14, transform=plt.gcf().transFigure)
plt.text(0.68, 0.11, "Verwendete Parametereinstellungen:", fontsize=6, transform=plt.gcf().transFigure)
plt.text(0.68, 0.09, "[BPM: {}], [Offset: {} ms], [Threshold: {}], [Subdiv: {}]".format(bpm, offsetms, threshold, subdiv), fontsize=6, transform=plt.gcf().transFigure)
plt.text(0.68, 0.075, "[CapPeaks: {}], [Sigma16Threshold: {}], [MuThreshold: {}]".format(cappeaks, sigma16threshold, muthreshold), fontsize=6, transform=plt.gcf().transFigure)
plt.text(0.95, 0.02, "v1.0.0", fontsize=6, transform=plt.gcf().transFigure)
#plt.tight_layout()
# Return value to show score in GUI
return sigmams, len(beatindex)
def showplot():
plt.show()
if __name__ == '__main__':
timealyze(bpm, offsetms, threshold, deadzone, subdiv, cappeaks)
showplot()
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.