seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
650432757
|
import os
import luigi
import json
import z5py
import numpy as np
from ..cluster_tasks import WorkflowBase
from ..relabel import RelabelWorkflow
from ..relabel import find_uniques as unique_tasks
from ..node_labels import NodeLabelWorkflow
from ..features import RegionFeaturesWorkflow
from .. import write as write_tasks
from . import size_filter_blocks as size_filter_tasks
from . import background_size_filter as bg_tasks
from . import filling_size_filter as filling_tasks
from . import filter_blocks as filter_tasks
from . import id_filter as id_tasks
from . import orphan_assignments as orphan_tasks
from . import graph_watershed_assignments as gws_tasks
from . import graph_connected_components as cc_tasks
class SizeFilterWorkflow(WorkflowBase):
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
size_threshold = luigi.IntParameter()
hmap_path = luigi.Parameter(default='')
hmap_key = luigi.Parameter(default='')
relabel = luigi.BoolParameter(default=True)
preserve_zeros = luigi.BoolParameter(default=False)
def _bg_filter(self, dep):
filter_task = getattr(bg_tasks,
self._get_task_name('BackgroundSizeFilter'))
dep = filter_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
output_path=self.output_path,
output_key=self.output_key,
dependency=dep)
return dep
def _ws_filter(self, dep):
filter_task = getattr(filling_tasks,
self._get_task_name('FillingSizeFilter'))
dep = filter_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
output_path=self.output_path,
output_key=self.output_key,
hmap_path=self.hmap_path,
hmap_key=self.hmap_key,
preserve_zeros=self.preserve_zeros,
dependency=dep)
return dep
def requires(self):
un_task = getattr(unique_tasks,
self._get_task_name('FindUniques'))
dep = un_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
return_counts=True,
dependency=self.dependency,
prefix='size_filter')
sf_task = getattr(size_filter_tasks,
self._get_task_name('SizeFilterBlocks'))
dep = sf_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
size_threshold=self.size_threshold,
dependency=dep)
if self.hmap_path == '' or self.hmap_path is None:
assert self.hmap_key == '' or self.hmap_key is None
dep = self._bg_filter(dep)
else:
assert self.hmap_key != ''
dep = self._ws_filter(dep)
if self.relabel:
dep = RelabelWorkflow(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
target=self.target,
input_path=self.output_path,
input_key=self.output_key,
assignment_path=self.output_path,
assignment_key='relabel_size_filter',
prefix='size_filter',
dependency=dep)
return dep
@staticmethod
def get_config():
configs = super(SizeFilterWorkflow, SizeFilterWorkflow).get_config()
configs.update({'size_filter_blocks': size_filter_tasks.SizeFilterBlocksLocal.default_task_config(),
'background_size_filter': bg_tasks.BackgroundSizeFilterLocal.default_task_config(),
'filling_size_filter': filling_tasks.FillingSizeFilterLocal.default_task_config(),
**RelabelWorkflow.get_config()})
return configs
class FilterLabelsWorkflow(WorkflowBase):
input_path = luigi.Parameter()
input_key = luigi.Parameter()
label_path = luigi.Parameter()
label_key = luigi.Parameter()
node_label_path = luigi.Parameter()
node_label_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
filter_labels = luigi.ListParameter()
def requires(self):
dep = NodeLabelWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
target=self.target, max_jobs=self.max_jobs,
ws_path=self.input_path, ws_key=self.input_key,
input_path=self.label_path, input_key=self.label_key,
output_path=self.node_label_path,
output_key=self.node_label_key,
prefix='filter_labels', max_overlap=True,
dependency=self.dependency)
id_task = getattr(id_tasks,
self._get_task_name('IdFilter'))
id_filter_path = os.path.join(self.output_path, 'filtered_ids.json')
dep = id_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
dependency=dep, max_jobs=self.max_jobs,
node_label_path=self.node_label_path,
node_label_key=self.node_label_key,
output_path=id_filter_path,
filter_labels=self.filter_labels)
filter_task = getattr(filter_tasks,
self._get_task_name('FilterBlocks'))
dep = filter_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
dependency=dep, max_jobs=self.max_jobs,
input_path=self.input_path, input_key=self.input_key,
filter_path=id_filter_path,
output_path=self.output_path, output_key=self.output_key)
return dep
@staticmethod
def get_config():
configs = super(FilterLabelsWorkflow, FilterLabelsWorkflow).get_config()
configs.update({'id_filter':
id_tasks.IdFilterLocal.default_task_config(),
'filter_blocks':
filter_tasks.FilterBlocksLocal.default_task_config(),
**NodeLabelWorkflow.get_config()})
return configs
class ApplyThreshold(luigi.Task):
feature_path = luigi.Parameter()
feature_key = luigi.Parameter()
out_path = luigi.Parameter()
threshold = luigi.FloatParameter()
threshold_mode = luigi.Parameter(default='less')
dependency = luigi.TaskParameter()
threshold_modes = ('less', 'greater', 'equal')
def requires(self):
return self.dependency
def run(self):
f = z5py.File(self.feature_path)
ds = f[self.feature_key]
feats = ds[:, 0]
assert self.threshold_mode in self.threshold_modes
if self.threshold_mode == 'less':
filter_ids = feats < self.threshold
elif self.threshold_mode == 'greater':
filter_ids = feats > self.threshold
elif self.threshold_mode == 'equal':
filter_ids = feats == self.threshold
filter_ids = np.where(filter_ids)[0].tolist()
with open(self.out_path, 'w') as f:
json.dump(filter_ids, f)
def output(self):
return luigi.LocalTarget(self.out_path)
class FilterByThresholdWorkflow(WorkflowBase):
input_path = luigi.Parameter()
input_key = luigi.Parameter()
seg_in_path = luigi.Parameter()
seg_in_key = luigi.Parameter()
seg_out_path = luigi.Parameter()
seg_out_key = luigi.Parameter()
threshold = luigi.FloatParameter()
relabel = luigi.BoolParameter(default=True)
def requires(self):
# calculate the region features
feat_path = os.path.join(self.tmp_folder, 'reg_feats.n5')
feat_key = 'feats'
dep = RegionFeaturesWorkflow(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
target=self.target, config_dir=self.config_dir,
input_path=self.input_path, input_key=self.input_key,
labels_path=self.seg_in_path, labels_key=self.seg_in_key,
output_path=feat_path, output_key=feat_key)
# apply threshold to get the ids to filter out
id_filter_path = os.path.join(self.tmp_folder, 'filtered_ids.json')
dep = ApplyThreshold(feature_path=feat_path, feature_key=feat_key,
out_path=id_filter_path, threshold=self.threshold,
dependency=dep)
# filter all blocks
filter_task = getattr(filter_tasks,
self._get_task_name('FilterBlocks'))
dep = filter_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
dependency=dep, max_jobs=self.max_jobs,
input_path=self.seg_in_path, input_key=self.seg_in_key,
filter_path=id_filter_path,
output_path=self.seg_out_path, output_key=self.seg_out_key)
if self.relabel:
dep = RelabelWorkflow(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
target=self.target,
input_path=self.seg_out_path,
input_key=self.seg_out_key,
assignment_path=self.seg_out_path,
assignment_key='assignments/relabel_filter',
prefix='pp_filter_by_threshold',
dependency=dep)
return dep
@staticmethod
def get_config():
configs = super(FilterByThresholdWorkflow, FilterByThresholdWorkflow).get_config()
configs.update({'filter_blocks': filter_tasks.FilterBlocksLocal.default_task_config(),
**RegionFeaturesWorkflow.get_config()})
return configs
class FilterOrphansWorkflow(WorkflowBase):
graph_path = luigi.Parameter()
graph_key = luigi.Parameter()
path = luigi.Parameter()
segmentation_key = luigi.Parameter()
assignment_key = luigi.Parameter()
output_path = luigi.Parameter()
assignment_out_key = luigi.Parameter()
output_key = luigi.Parameter(default=None)
relabel = luigi.BoolParameter(default=False)
def requires(self):
assert False, "FIXME not debugged yet"
dep = self.dependency
orphan_task = getattr(orphan_tasks,
self._get_task_name('OrphanAssignments'))
dep = orphan_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
graph_path=self.graph_path, graph_key=self.graph_key,
assignment_path=self.path, assignment_key=self.assignment_key,
output_path=self.path, output_key=self.assignment_out_key,
relabel=self.relabel)
if self.output_key is not None:
write_task = getattr(write_tasks,
self._get_task_name('Write'))
dep = write_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
input_path=self.path, input_key=self.segmentation_key,
output_path=self.path, output_key=self.output_key,
assignment_path=self.output_path, assignment_key=self.assignment_out_key,
identifier='filter-orphans')
return dep
@staticmethod
def get_config():
configs = super(FilterOrphansWorkflow, FilterOrphansWorkflow).get_config()
configs.update({'orphan_assignments': orphan_tasks.OrphanAssignmentsLocal.default_task_config(),
'write': write_tasks.WriteLocal.default_task_config()})
return configs
class ConnectedComponentsWorkflow(WorkflowBase):
problem_path = luigi.Parameter()
graph_key = luigi.Parameter()
assignment_path = luigi.Parameter()
assignment_key = luigi.Parameter()
output_path = luigi.Parameter()
assignment_out_key = luigi.Parameter()
output_key = luigi.Parameter(default='')
path = luigi.Parameter(default='')
fragments_key = luigi.Parameter(default='')
def requires(self):
cc_task = getattr(cc_tasks,
self._get_task_name('GraphConnectedComponents'))
dep = cc_task(max_jobs=self.max_jobs, tmp_folder=self.tmp_folder,
config_dir=self.config_dir,
problem_path=self.problem_path,
graph_key=self.graph_key,
assignment_path=self.assignment_path,
assignment_key=self.assignment_key,
output_path=self.output_path,
output_key=self.assignment_out_key,
dependency=self.dependency)
if self.output_key != '':
write_task = getattr(write_tasks,
self._get_task_name('Write'))
assert self.fragments_key != '' and self.path != ''
dep = write_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
input_path=self.path, input_key=self.fragments_key,
output_path=self.output_path, output_key=self.output_key,
assignment_path=self.output_path,
assignment_key=self.assignment_out_key,
identifier='graph-connected-components')
return dep
@staticmethod
def get_config():
configs = super(ConnectedComponentsWorkflow, ConnectedComponentsWorkflow).get_config()
configs.update({'graph_connected_components': cc_tasks.GraphConnectedComponentsLocal.default_task_config(),
'write': write_tasks.WriteLocal.default_task_config()})
return configs
class SizeFilterAndGraphWatershedWorkflow(WorkflowBase):
problem_path = luigi.Parameter()
graph_key = luigi.Parameter()
features_key = luigi.Parameter()
#
path = luigi.Parameter()
# path to the merged segmentation
segmentation_key = luigi.Parameter()
# path to the underlying fragments
fragments_key = luigi.Parameter(default='')
# path to the fragment segment assignment
assignment_key = luigi.Parameter()
# the size filter threshold
size_threshold = luigi.IntParameter(default=None)
target_number = luigi.IntParameter(default=None)
relabel = luigi.BoolParameter(default=False)
from_costs = luigi.BoolParameter(default=False)
output_path = luigi.Parameter()
assignment_out_key = luigi.Parameter()
output_key = luigi.Parameter(default='')
def find_sizes(self, dep):
# find segemnts that should be merged according to the size filter
un_task = getattr(unique_tasks,
self._get_task_name('FindUniques'))
dep = un_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.path,
input_key=self.segmentation_key,
return_counts=True,
dependency=dep,
prefix='size-filter-and-graph-watershed')
sf_task = getattr(size_filter_tasks,
self._get_task_name('SizeFilterBlocks'))
dep = sf_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.path,
input_key=self.segmentation_key,
size_threshold=self.size_threshold,
target_number=self.target_number,
dependency=dep)
return dep
def requires(self):
assert (self.size_threshold is None) != (self.target_number is None)
dep = self.dependency
# find the sizes for all segments
dep = self.find_sizes(dep)
# run graph watershed to merge in all small segments
filter_path = os.path.join(self.tmp_folder, 'discard_ids.npy')
gws_task = getattr(gws_tasks,
self._get_task_name('GraphWatershedAssignments'))
dep = gws_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
problem_path=self.problem_path, graph_key=self.graph_key,
features_key=self.features_key,
assignment_path=self.path, assignment_key=self.assignment_key,
output_path=self.output_path, output_key=self.assignment_out_key,
filter_nodes_path=filter_path,
relabel=self.relabel, from_costs=self.from_costs)
if self.output_key != '':
assert self.fragments_key != ''
write_task = getattr(write_tasks,
self._get_task_name('Write'))
dep = write_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
input_path=self.path, input_key=self.fragments_key,
output_path=self.output_path, output_key=self.output_key,
assignment_path=self.output_path, assignment_key=self.assignment_out_key,
identifier='size-filter-graph-ws')
return dep
@staticmethod
def get_config():
configs = super(SizeFilterAndGraphWatershedWorkflow,
SizeFilterAndGraphWatershedWorkflow).get_config()
configs.update({'size_filter_blocks': size_filter_tasks.SizeFilterBlocksLocal.default_task_config(),
'graph_watershed_assignments':
gws_tasks.GraphWatershedAssignmentsLocal.default_task_config(),
'write': write_tasks.WriteLocal.default_task_config()})
return configs
|
constantinpape/cluster_tools
|
cluster_tools/postprocess/postprocess_workflow.py
|
postprocess_workflow.py
|
py
| 19,543 |
python
|
en
|
code
| 32 |
github-code
|
6
|
10176346880
|
import requests
import re
import warnings
import json
class NHGIS:
'''API wrapper for the IPUMS NHGIS API.
API Documentation: https://developer.ipums.org/docs/get-started/
Arguments:
api_key: Authorization key required for use of the IPUMS API. *Required*
API keys can be obtained here: https://account.ipums.org/api_keys
'''
def __init__(self, api_key):
NHGIS.header = {"Authorization": api_key}
self.documentation_link = 'https://developer.ipums.org/docs/get-started/'
self.geographic_extent_options = {"010": "Nation",
"020": "Region",
"030": "Division2",
"040": "State",
"050": "State-County",
"140": "State-County-Census Tract",
"155": "State-Place-County",
"160": "State-Place",
"250": "American Indian Area/Alaska Native Area Hawaiian Home Land",
"310": "Metropolitan Statistical Area/Micropolitan Statistical Area",
"500": "State-Congressional District"}
self.dataset = Dataset
self.time_series = TimeSeries
def dataset_metadata(self, dataset=None, data_table=None):
'''
Returns metadata for IPUMS datasets.
Arguments:
dataset: Default: None. If None, metadata for all available datasets is returned. If the name of a
a dataset is given, metadata for the specified dataset is returned.
data_table: Default: None. Data table can only be specified when `dataset` != None.
If specified, metadata for the specified table is returned.
'''
if not dataset and not data_table:
url = "https://api.ipums.org/metadata/nhgis/datasets?version=v1"
elif dataset and not data_table:
url = f"https://api.ipums.org/metadata/nhgis/datasets/{dataset}?version=v1"
elif dataset and data_table:
url = f"https://api.ipums.org/metadata/nhgis/datasets/{dataset}/data_tables/{data_table}?version=v1"
elif not dataset and data_table:
raise ValueError('A dataset must be provided when data_table != None')
response = requests.get(url, headers=self.header)
return self._validate_response(response)
def time_series_metadata(self, data_table=None):
'''
Returns metadata for available time-series tables.
Arguments:
data_table: Default: None. If None, metadata for all data tables is returned.
If specified, metadata for the specified table is returned.
'''
if not data_table:
url = "https://api.ipums.org/metadata/nhgis/time_series_tables?version=v1"
else:
url = f"https://api.ipums.org/metadata/nhgis/time_series_tables/{data_table}?version=v1"
response = requests.get(url, self.header)
return self._validate_response(response)
def shapefile_metadata(self):
'''
Returns metadata for available shapefiles.
Arguments:
No arguments are available for this method.
'''
url = "https://api.ipums.org/metadata/nhgis/shapefiles?version=v1"
response = requests.get(url, self.header)
return self._validate_response(response)
def _validate_response(self, response):
if response.status_code == 200:
return response.json()
else:
raise ValueError(f'''A {response.status_code} error code was returned.\n
The following reason was given: {response.reason}
API Keys can be obtained here: https://account.ipums.org/api_keys
API documentation is here: {self.documentation_link}
If need be, api error code documentation can be found here
https://developer.mozilla.org/en-US/docs/Web/HTTP/Status''')
def _validate_shapefiles(self, shapefiles):
supported_files = [file['name'] for file in self.shapefile_metadata()]
for file in shapefiles:
if file not in supported_files:
raise ValueError(f'Shapefile: {file} could not be found.')
def _create_payload(self, datasets=None, time_series_tables=None, shapefiles=None,
data_format='csv_no_header', breakdown_and_data_type_layout='separate_files',
time_series_table_layout=None, geographic_extents=None, description=None):
'''
Receives Dataset class objects and returns a json payload formatted according to the specifications
of the IPUMS API.
'''
payload = {}
if datasets:
if isinstance(datasets, list):
payload['datasets'] = {}
for dataset in datasets:
if isinstance(dataset, Dataset):
if dataset.extent_required and not geographic_extents:
warnings.warn(f'''
Geographic extent is required for
Dataset {dataset.dataset} with the provided
geographic levels. It is recommended that this
extent be provided. By default, all geo extents are requested.
Geograpghic extent options can be accessed with the
NHGIS.geographic_extent_options attribute.''')
geographic_extents = ['*']
payload['datasets'].update(dataset.to_payload())
elif isinstance(dataset, dict):
payload['datasets'].update(dataset)
warnings.warn('''Validation is not provided for
dictionary inputs.
Use of NHGIS.dataset is recommended.''')
else:
raise ValueError(f'datasets cannot be datatype: {type(dataset)}')
if time_series_tables:
payload['time_series_tables'] = {}
if isinstance(time_series_tables, list):
for table in time_series_tables:
if isinstance(table, TimeSeries):
payload['time_series_tables'].update(table.to_payload())
elif isinstance(table, dict):
payload['time_series_tables'].update(table)
warnings.warn('''Validation is not provided for
dictionary inputs.
Use of NHGIS.time_series is recommended.''')
else:
raise ValueError(f'time_series_tables cannot be datatype: {type(table)}')
elif isinstance(time_series_tables, TimeSeries):
payload['time_series_tables'].update(time_series_tables.to_payload())
else:
raise ValueError('time_series_tables must be a list or a TimeSeries instance.')
if shapefiles:
payload['shapefiles'] = shapefiles
if time_series_tables:
payload['time_series_table_layout'] = time_series_table_layout
if geographic_extents:
payload['geographic_extents'] = geographic_extents
payload['data_format'] = data_format
if description:
payload['description'] = description
else:
payload['description'] = 'ipumspy extract'
if breakdown_and_data_type_layout:
payload['breakdown_and_data_type_layout'] = breakdown_and_data_type_layout
payload_json = json.dumps(payload)
payload_json = json.loads(payload_json)
return payload_json
def create_extract(self, datasets=None, time_series_tables=None, shapefiles=None,
data_format='csv_no_header', breakdown_and_data_type_layout='separate_files',
time_series_table_layout=None, geographic_extents=None, description=None):
'''
Submit a data extract request to the IPUMS NHGIS API.
Currently, the IPUMS API does not support downloading directly from the API.
An email notification will be received confirming your extract request.
'''
url = "https://api.ipums.org/extracts/?product=nhgis&version=v1"
if shapefiles:
self._validate_shapefiles(shapefiles)
payload = self._create_payload(datasets=datasets, time_series_tables=time_series_tables, shapefiles=shapefiles,
data_format=data_format, breakdown_and_data_type_layout=breakdown_and_data_type_layout,
time_series_table_layout=time_series_table_layout,
geographic_extents=geographic_extents, description=description)
result = requests.post(url, headers=self.header, json=payload).json()
if 'number' in result:
self.extract_number = result['number']
return result
def extract_status(self, status_only = True):
'''
Returns that status of the most recent data extract request.
'''
url = f"https://api.ipums.org/extracts/{self.extract_number}?product=nhgis&version=v1"
response = requests.get(url, headers=self.header)
if status_only:
return response.json()['status']
else:
return response.json()
class Dataset(NHGIS):
'''A wrapper for creating validating requests to the IPUMS NHGIS API.
This class is used to format the json data structure for the NHGIS class.'''
def __init__(self, dataset, data_tables, geog_levels, years=None, breakdowns=[]):
self._validate(dataset, data_tables, geog_levels, years=years, breakdowns=breakdowns)
self.dataset = dataset
self.data_tables = data_tables
self.geog_levels = geog_levels
self.breakdowns = breakdowns
def _validate(self, dataset, data_tables, geog_levels, years=None, breakdowns=[]):
self.validate_types(dataset, data_tables, geog_levels, years, breakdowns)
metadata = self.dataset_metadata(dataset=dataset)
self.validate_data_tables(metadata, data_tables)
self.validate_geog_levels(metadata, geog_levels)
self.validate_years(metadata, years)
self.validate_breakdowns(metadata, breakdowns)
def validate_data_tables(self, metadata, data_tables):
supported_tables = [value['name'] for value in metadata['data_tables']]
for table in data_tables:
if table not in supported_tables:
raise ValueError(f'''Data Table: {table} is not supported for dataset: {metadata["name"]}''')
def validate_geog_levels(self, metadata, geog_levels):
supported_levels = [value['name'] for value in metadata['geog_levels']]
for level in geog_levels:
if level not in supported_levels:
raise ValueError(f'''Geo level: {level} is not supported for dataset: {metadata["name"]}''')
self.extent_required = False
for level in metadata['geog_levels']:
if level['name'] in geog_levels:
if level['has_geog_extent_selection']:
warnings.warn(f"""
Geographic level: '{level['name']}' for Dataset: '{metadata['name']}'
requires geog_extent selection when extraction is made.
Available geographic extent options can be accessed with the
`NHGIS.geographic_extent_options` attribute.
The `NHGIS.create_extract` method has a default geog_extent of ['*']
""")
self.extent_required = True
def validate_breakdowns(self, metadata, breakdowns):
if breakdowns:
if not 'breakdowns' in metadata:
raise ValueError(f'Breakdowns are not supported for {metadata["name"]}')
supported_breakdowns_list = metadata['breakdowns']
supported_breakdowns = []
for entry in supported_breakdowns_list:
supported_breakdowns += [value['name'] for value in entry['breakdown_values']]
for breakdown in breakdowns:
if breakdown not in supported_breakdowns:
raise ValueError(f'''Breakdown: {breakdown} is not supported for dataset: {metadata["name"]}''')
def is_multiyear(self, metadata):
year_count = re.findall('(\d{4})', metadata['name'])
if year_count:
count = len(set(year_count))
else:
count = 1
if count > 1:
return True
def year_range(self, metadata):
years = re.findall('(\d{4})', metadata['name'])
if years:
years = [int(year) for year in years]
return [year for year in range(years[0], years[1] + 1)]
def validate_years(self, metadata, years):
multiyear = self.is_multiyear(metadata)
if multiyear and not years:
raise ValueError(f'{metadata["name"]} has multiple years. `years` cannot be set to `None`.')
if multiyear and years:
supported_years = self.year_range(metadata)
for year in years:
if year not in supported_years:
raise ValueError(f'''Year: {year} is not support for dataset: {metadata["name"]}.''')
if not multiyear and years:
if len(years) > 1:
raise ValueError(f'Dataset: {metadata["name"]} is not a multi year dataset, but multiple years were given.')
supported_year = int(re.findall('(\d{4})', metadata["name"])[0])
if not supported_year == years[0]:
raise ValueError(f'Dataset: {metadata["name"]} supports the year {supported_year}, but {years[0]} was given.')
if not multiyear and not years:
self.years = []
self.years = years
def validate_types(self, dataset, data_tables, geog_levels, years, breakdowns):
if type(dataset) != str:
raise ValueError('dataset variable must be string.')
if not type(data_tables) == list:
raise ValueError('data_tables variable must be a list.')
if not all(isinstance(item, str) for item in data_tables):
raise ValueError('data_tables variable must be a list of strings.')
if not type(geog_levels) == list:
raise ValueError('geog_levels variable must be a list.')
if not all(isinstance(item, str) for item in geog_levels):
raise ValueError('geog_levels variable must be a list of strings.')
if years:
if type(years) != list:
raise ValueError('year variable must be a list for multi year datasets.')
if not all(isinstance(item, int) for item in years):
raise ValueError('year variable must be a list of integers for multi year datasets.')
if breakdowns:
if type(breakdowns) != list:
raise ValueError('If breakdowns != None, the variable must be a list.')
if not all(isinstance(item, str) for item in breakdowns):
raise ValueError('If breakdowns != None, the variable must be a list of strings.')
def __repr__(self):
return f'''Dataset(dataset: {self.dataset},
Number of tables: {len(self.data_tables)},
Number of geographies: {len(self.geog_levels)},
Number of breakdowns: {len(self.breakdowns) if self.breakdowns else self.breakdowns},
years: {self.years})'''
def to_payload(self):
payload = {self.dataset: {
"years": [str(year) for year in self.years] if self.years else [],
"breakdown_values": self.breakdowns,
"data_tables": self.data_tables,
"geog_levels": self.geog_levels
}}
return payload
class TimeSeries(NHGIS):
'''A wrapper for creating validating requests to the IPUMS NHGIS API.
This class is used to format the json data structure for the NHGIS class.'''
def __init__(self, data_table, geog_levels='macro'):
self.validate(data_table, geog_levels)
def validate(self, data_table, geog_levels):
self.validate_types(data_table, geog_levels)
metadata = self.time_series_metadata(data_table=data_table)
self.data_table = data_table
self.validate_geog_levels(metadata, geog_levels)
def validate_types(self, data_table, geog_levels):
if type(data_table) != str:
raise ValueError('`data_table` variable must be a string.')
if geog_levels != 'macro':
if type(geog_levels) != list:
raise ValueError('If `geog_levels` != "macro" `geog_levels` must be a list.')
if not all(isinstance(item, str) for item in geog_levels):
raise ValueError('If `geog_levels` != "macro" `geog_levels` must be a list of strings.')
def validate_geog_levels(self, metadata, geog_levels):
if geog_levels == 'macro':
self.geog_levels = [metadata['geog_levels'][0]['name']]
else:
supported_levels = [value['name'] for value in metadata['geog_levels']]
for level in geog_levels:
if level not in supported_levels:
raise ValueError(f'Time Series {metadata["name"]} does not support geo level: "{level}"')
self.geog_levels = geog_levels
def to_payload(self):
payload = {self.data_table: {
'geog_levels': self.geog_levels}}
return payload
|
joelsewhere/ipumspy
|
ipumspy.py
|
ipumspy.py
|
py
| 18,512 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5736450663
|
#Esse programa tem o intuito de calcular o desconto progressivo de uma simulação de um pacote de viagem
#Solicitando as informações para a pessoa calcular o pacote
valor_bruto = float(input("Insira o valor bruto do pacote de viagem: "))
categoria = input("Insira o nome da categoria do pacote (Economica, executiva ou primeira classe): ")
numero_pessoas = int(input("Insira no número de pessoas no pacote (Lembrando que o desconto só é válido se as pessoas morarem na mesma casa): "))
#valor final já definido para evitar que o código quebre caso o usuário coloque uma condição que não faça sentido
valor_final = valor_bruto
#condição da classe economica
if categoria.upper() == "ECONOMICA":
if numero_pessoas == 2:
#3% de desconto
valor_final = valor_bruto * 0.97
elif numero_pessoas == 3:
#4% de desconto
valor_final = valor_bruto * 0.96
elif numero_pessoas >= 4:
#5% de desconto
valor_final = valor_bruto * 0.95
else:
print("Por favor, insira um valor válido e tente novamente")
#condição da classe executiva
if categoria.upper() == "EXECUTIVA":
if numero_pessoas == 2:
#5% de desconto
valor_final = valor_bruto * 0.95
elif numero_pessoas == 3:
#7% de desconto
valor_final = valor_bruto * 0.93
elif numero_pessoas >= 4:
#8% de desconto
valor_final = valor_bruto * 0.92
else:
print("Por favor, insira um valor válido e tente novamente")
#condição da primeira classe
if categoria.upper() == "PRIMEIRA CLASSE":
if numero_pessoas == 2:
#10% de desconto
valor_final = valor_bruto * 0.9
elif numero_pessoas == 3:
#15% de desconto
valor_final = valor_bruto * 0.85
elif numero_pessoas >= 4:
#20% de desconto
valor_final = valor_bruto * 0.80
else:
print("Por favor, insira um valor válido e tente novamente")
#mostrando o resultado para o usuário
print("Valor Bruto: {}".format(valor_bruto))
print("Valor Líquido: {}".format(valor_final))
#calculo do valor médio por pessoa
if numero_pessoas > 0:
valor_medio = valor_final/numero_pessoas
print("Valor médio por pessoa do pacote: {}".format(valor_medio))
|
pedrokli/estudos-python
|
agencia_viagem.py
|
agencia_viagem.py
|
py
| 2,257 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
10424289451
|
#-*- coding: utf-8 -*-
u"""
.. moduleauthor:: Martí Congost <[email protected]>
"""
from cocktail import schema
from woost.models import Configuration
from woost.extensions.audio.audiodecoder import AudioDecoder
from woost.extensions.audio.audioencoder import AudioEncoder
pos = Configuration.groups_order.index("media.images")
Configuration.groups_order.insert(pos + 1, "audio")
Configuration.members_order += ["audio_decoders", "audio_encoders"]
Configuration.add_member(
schema.Collection("audio_decoders",
items = schema.Reference(type = AudioDecoder),
related_end = schema.Reference(),
integral = True,
member_group = "media.audio"
)
)
Configuration.add_member(
schema.Collection("audio_encoders",
items = schema.Reference(type = AudioEncoder),
related_end = schema.Reference(),
integral = True,
member_group = "media.audio"
)
)
|
marticongost/woost
|
woost/extensions/audio/configuration.py
|
configuration.py
|
py
| 926 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26053689790
|
from itertools import permutations
vowels = ["о", "а"]
consonants = ["в", "т", "р"]
result = set()
for index, i in enumerate(permutations("авторота")):
correct = True
for symbol_index in range(0, len(i) - 1):
if (i[symbol_index] in vowels and i[symbol_index + 1] in vowels) or \
(i[symbol_index] in consonants and i[symbol_index + 1] in consonants):
correct = False
break
if correct:
result.add(i)
print(len(result))
|
Woolfer0097/UGE_IT
|
8 task/236.py
|
236.py
|
py
| 501 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14018247881
|
import cv2
import numpy as np
def motion_detector(videofile):
window_raw = "Raw video"
window_preprocessed = "Preprocessed video"
window_motion = "Video motion"
window_finished = "Thermal Video"
window_test1 = "Test1"
cv2.namedWindow(window_raw)
cv2.namedWindow(window_preprocessed)
cv2.namedWindow(window_motion)
cv2.namedWindow(window_finished)
cv2.namedWindow(window_test1)
cv2.moveWindow(window_raw, 0, 0)
cv2.moveWindow(window_preprocessed, 320, 0)
cv2.moveWindow(window_motion, 0, 265)
cv2.moveWindow(window_finished, 320, 265)
cv2.moveWindow(window_test1, 640, 0)
# Setup video windows so that they don't overlap
# Load video file
if videofile is None:
print("Could not find video file")
return
previous_frame = None
frame_width = int(videofile.get(3))
frame_height = int(videofile.get(4))
size = (frame_width, frame_height)
outer_bounds = [frame_width, 0, frame_height, 0] #[xmin, xmax, ymin, ymax]
result = cv2.VideoWriter('Results/Gas_detection.mp4',cv2.VideoWriter_fourcc(*'MP4V'), 18, size)
high_activity_areas = [outer_bounds]
activity_percentage = 0.8
activity_area_pixel_margin = 50
while True:
# 1. Load image
ret, frame = videofile.read()
if ret:
cv2.imshow(window_raw, frame)
# # 2. Prepare image; grayscale and blur
prepared_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
prepared_frame = cv2.GaussianBlur(src=prepared_frame, ksize=(7,7), sigmaX=0)
cv2.imshow(window_preprocessed, prepared_frame)
# 3. Set previous frame and continue if there is None
if (previous_frame is None):
previous_frame = prepared_frame
continue
# calculate difference and update previous frame
diff_frame = cv2.absdiff(src1=previous_frame, src2=prepared_frame)
previous_frame = prepared_frame
# 4. Dilute the image a bit to make differences more seeable; more suitable for contour detection
kernel = np.ones((1, 1))
diff_frame = cv2.dilate(diff_frame, kernel, 1)
# 5. Only take different areas that are different enough (>20 / 255)
thresh_frame = cv2.threshold(src=diff_frame, thresh=3, maxval=255, type=cv2.THRESH_BINARY)[1]
cv2.imshow(window_motion, thresh_frame)
finished_frame = frame
contours, _ = cv2.findContours(image=thresh_frame, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
if cv2.contourArea(contour) < 5:
# too small: skip!
continue
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(img=finished_frame, pt1=(x, y), pt2=(x + w, y + h), color=(0, 255, 0), thickness=2)
cv2.imshow(window_finished, finished_frame)
result.write(finished_frame)
#----------------------------------------------------------------------------
# for contour in contours:
# contour_placed_in_area = False
# if cv2.contourArea(contour) < 5:
# # too small: skip!
# continue
# [x, y, w, h] = cv2.boundingRect(contour)
# contour_border = [x, x+w, y, y+h]
# for area_border in high_activity_areas:
# # for i in range(0, 4):
# # if(abs(contour_border[i]-area_border[i])>activity_area_pixel_margin):
# # continue
# cont = cv2.drawContours(frame, area_border, -1, (255,0,0), 1)
# if(cv2.pointPolygonTest(cont, (x,y), True)):
# continue
# area_border = [min(area_border[0], contour_border[0]), max(area_border[1], contour_border[1]), min(area_border[2], contour_border[2]), max(area_border[3], contour_border[3])]
# cv2.rectangle(img=frame, pt1=(outer_bounds[0], outer_bounds[2]), pt2=(outer_bounds[1], outer_bounds[3]), color=(0, 0, 255), thickness=2)
# cv2.imshow(window_test1, frame)
#----------------------------------------------------------------------------
else:
break
# press escape to exit
if (cv2.waitKey(30) == 27):
return 0
cv2.destroyAllWindows()
# videofile.release()
# result.release()
return 1
# def main():
# cap = cv2.VideoCapture('/Users/MORFRE/Pictures/Mongstad/Flir dataset nov 2022/112ppm hydrogen/Leak/MOV_1669.mp4')
# motion_detector(cap)
|
Christdej/gas-analysis
|
src/gas_analysis/gas_detection.py
|
gas_detection.py
|
py
| 4,763 |
python
|
en
|
code
| null |
github-code
|
6
|
73996523069
|
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import random
import os
import glob
import sys
import wandb
import gru_models
import build_vocab
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# TODO: Get rid of these, just pass them in to dataset.
BATCH_SIZE=16
TRAIN_TOKEN_LEN=256
#from vocab_building impor t load_tokenized_file, load_vocab, decode_vocab, nlp, get_vocab_indx_vector
def find_files(path): return glob.glob(path)
class RNN_Dataset_multiple_sources(torch.utils.data.Dataset):
def __init__(
self,
sequence_length,
type
):
folder = "../vocabs_and_tokens/" + type + "/"
data_folder = "../data/" + type + "/"
vocab_file = folder + "*.pt"
token_files = folder + "*.pkl"
self.sequence_length = sequence_length
self.all_categories, self.n_categories = self.setup_categories(data_folder)
self.load_words(vocab_file, token_files)
self.uniq_words = len(self.vocab)
# data_folder needs to be like '../data/reviews/'
def setup_categories(self, data_folder):
all_categories = []
for filename in find_files(data_folder + '*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
n_categories = len(all_categories)
if n_categories == 0:
raise RuntimeError('Data not found.')
print('# categories:', n_categories, all_categories)
#all_categories.remove('garden')
#all_categories.remove('music')
#all_categories.remove('small_combined')
#n_categories_languages = len(all_categories)
#print('# categories:', n_categories_languages, all_categories)
return all_categories, n_categories
def load_words(self, vocab_file, token_files):
# We want the vocab to be constructed from all sources, but we need the raw token sets for each seperately.
# The category vector can just be a simple index vector.
self.vocab = build_vocab.load_vocab(find_files(vocab_file)[0])
token_files = find_files(token_files)
# This is only setup to handle two different categories right now
self.raw_tokens_1 = build_vocab.load_tokenized_file(token_files[0])
self.raw_tokens_2 = build_vocab.load_tokenized_file(token_files[1])
self.num_samples_1 = len(self.raw_tokens_1)
self.num_samples_2 = len(self.raw_tokens_2)
# This is iffy, because we aren't actually going through all of the "samples"
self.num_samples = max(1, ((self.num_samples_1 + self.num_samples_2) // TRAIN_TOKEN_LEN)) # Split raw tokens into groups of TRAIN_TOKEN_LEN
self.num_batches = max(1, self.num_samples // BATCH_SIZE)
print('Number of raw_tokens: ', len(self.raw_tokens_1 + self.raw_tokens_2))
print('Number of samples in a batch: ', self.num_samples)
print('Number of batches: ', self.num_batches)
return 1
def random_choice(self, l):
return l[random.randint(0, len(l)-1)]
def category_tensor(self, category):
li = self.all_categories.index(category)
if li == 0:
tensor = torch.zeros(self.sequence_length).to(device).long()
else:
tensor = torch.ones(self.sequence_length).to(device).long()
return tensor, li
def __len__(self):
return self.num_samples
def __getitem__(self, index):
# This should pick a random source, grab it's category, and then grab a sequence associated with it.
# Pick random category
string_category= self.random_choice(self.all_categories)
category, category_index = self.category_tensor(string_category)
# Pick the right token samples based on the category
if category_index == 0:
current_sample = self.raw_tokens_1
else:
current_sample = self.raw_tokens_2
# We cut off the potential of it being too long
random_index = random.randint(0, len(current_sample) - (self.sequence_length + 1))
end_index = random_index + self.sequence_length
return ( # might break if it gets the very end?
torch.tensor(current_sample[random_index:end_index]).to(device), # x
torch.tensor(current_sample[random_index+1:end_index+1]).to(device), # y
category
)
def train(dataset, model, max_epochs, batch_size, cat = False):
train_losses = []
model.train()
dataloader = DataLoader(dataset, batch_size=batch_size, drop_last=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
for epoch in range(max_epochs):
total_loss = 0
for batch, (x, y, category) in enumerate(dataloader):
hidden_states = model.init_hidden(batch_size)
#print('x size: ', x.size()) # 16, 256
#print('category size: ', category.size()) # 16, 256
optimizer.zero_grad()
if cat:
y_pred, hidden_states = model(x, hidden_states, batch_size, category)
else:
y_pred, hidden_states = model(x, hidden_states, batch_size)
#print('y_pred size: ', y_pred.size()) # [16, 4822] for cells, [16, 256, 4822] normal
#print('y_pred transposed size: ', y_pred.transpose(1, 2).size()) # [16, 4822, 256]
loss = criterion(y_pred.transpose(1, 2), y)
total_loss += loss.item()
loss.backward()
optimizer.step()
print({ 'epoch': epoch, 'batch': batch, 'loss': loss.item() })
wandb.log({"loss":loss.item()})
batch_loss = total_loss/batch_size
train_losses.append(batch_loss)
wandb.log({"batch_loss":batch_loss})
return train_losses
def predict_with_category(dataset, model, text, category, next_words=100):
model.eval()
prediction = build_vocab.get_vocab_indx_vector(dataset.vocab, build_vocab.load_spacy, text)
tokens = torch.tensor(prediction).to(device)
# Get category tensor
li = dataset.all_categories.index(category)
if li == 0:
category = torch.zeros(len(prediction)).to(device).long()
else:
category = torch.ones(len(prediction)).to(device).long()
print('cat size: ', category.size())
print('prediction size: ', tokens.size())
state_h = model.init_hidden(1) # num_layers, batch_size, lstm_size
# Prime generation by feeding in initial input:
for p in range(len(tokens)-1):
_, state_h = model(tokens[p].view(1,-1), state_h, 1, category[p].view(1,-1))
#print('state_h size: ', state_h.size())
last_token = tokens[-1]
for i in range(0, next_words):
y_pred, state_h = model(last_token.view(1,-1), state_h, 1, category[0].view(1,-1))
#print('y_pred size: ', y_pred.size()) # [16, 256, 12923], should be [1, 1, 12923]
#print('y_pred[0][-1] size: ', y_pred[0][-1].size())
last_word_logits = y_pred[0][-1]
# These are the probabilities
p = torch.nn.functional.softmax(last_word_logits, dim=0)
word_index = torch.multinomial(p, 1)[0]
top_values = torch.topk(p, 5)
#top_words = top_values.indices
#top_probs = top_values.values
#print('word index: ', word_index)
#print('top_words: ', top_words.tolist())
#top_word_pred = decode_vocab(dataset.vocab, [word_index])
#top_words_pred = decode_vocab(dataset.vocab, top_words.tolist())
#print('The top word predicted was: ', top_word_pred)
#print('The top five predictions were: ', top_words_pred)
#print('Their probabilites are: ', top_probs)
prediction.append(word_index)
last_token = torch.tensor([word_index]).to(device)
final_prediction = build_vocab.decode_vocab(dataset.vocab, prediction)
return final_prediction
def train_wrapper(type, hidden_size, num_epochs):
# Create dataset
dataset = RNN_Dataset_multiple_sources(TRAIN_TOKEN_LEN, type)
input_size = dataset.uniq_words # Should be size of vocab?
n_layers = 3
print('----------------------')
print('Original GRU')
run = wandb.init(name='Original GRU',
project='controllableRNN',
config={
'dataset':type,
'epochs':num_epochs,
'hidden_size':hidden_size
},
reinit=True
)
# Model with normal pytorch GRU
category_model = gru_models.GRU_category(input_size, hidden_size, input_size, n_layers).to(device)
file_path = f"gru_trained_cat_" + type + ".pt"
losses_cat = train(dataset, category_model, num_epochs, BATCH_SIZE, cat=True)
torch.save(category_model.state_dict(), file_path)
run.finish()
"""print('----------------------')
print('Original GRU with cells')
# Model with GRU Cells
cells_category_model = gru_models.GRU_with_cells_category(input_size, hidden_size, input_size, n_layers).to(device)
file_path = f"gru_trained_cat_cells_" + type + ".pt"
losses_cat_cells = train(dataset, cells_category_model, num_epochs, BATCH_SIZE, True)
torch.save(cells_category_model.state_dict(), file_path)"""
print('----------------------')
print('Edited GRU')
run = wandb.init(name='Edited GRU',
project='controllableRNN',
config={
'dataset':type,
'epochs':num_epochs,
'hidden_size':hidden_size
},
reinit=True
)
# Model with edited GRU Cells
cells_category_edited_model = gru_models.GRU_with_cells_category_edited(input_size, hidden_size, input_size, n_layers).to(device)
file_path = f"gru_trained_cat_cells_edited_" + type + ".pt"
losses_cat_cells_edited = train(dataset, cells_category_edited_model, num_epochs, BATCH_SIZE, True)
torch.save(cells_category_edited_model.state_dict(), file_path)
run.finish()
# Create loss graph and save
"""fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(range(len(losses_cat)), losses_cat, label="original")
#ax.plot(range(len(losses_cat_cells)), losses_cat_cells, label="original with cells")
ax.plot(range(len(losses_cat_cells_edited)), losses_cat_cells_edited, label="edited")
plt.title("Loss over time")
plt.xlabel("Time")
plt.ylabel("Loss")
plt.legend()
plt.savefig('loss_' + str(type) + "_" + str(num_epochs) + "_" + str(hidden_size) + '.png')"""
def main():
wandb.login() # login to wandb
# Uncomment these to use arguments
#arguments = sys.argv[1:]
#type, num_epochs, hidden_size = arguments
#num_epochs = int(num_epochs)
#hidden_size = int(hidden_size)
print('TRAINING LANGUAGES- HIDDEN_SIZE-256 NUM_EPOCHS-300')
train_wrapper(type='books', hidden_size=256, num_epochs=50)
print('TRAINING LANGUAGES- HIDDEN_SIZE-512 NUM_EPOCHS-300')
train_wrapper(type='books', hidden_size=512, num_epochs=50)
print('TRAINING LANGUAGES- HIDDEN_SIZE-1024 NUM_EPOCHS-300')
train_wrapper(type='books', hidden_size=1024, num_epochs=50)
if __name__ == "__main__":
main()
|
JayOrten/controllableRNN
|
scripts/train_gru.py
|
train_gru.py
|
py
| 11,462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20154269616
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["get_locations"]
from .geocode import geocode
from .ads import get_author_locations
def get_locations(name):
affils = get_author_locations(name)
if not len(affils):
return []
locations = []
for year, affiliation in affils:
if year is None:
continue
loc = geocode(affiliation)
if loc is None:
continue
locations.append({
"year": year,
"affiliation": loc["affiliation"],
"latlng": loc["latlng"],
})
return sorted(locations, key=lambda l: int(l["year"]))
if __name__ == "__main__":
print(len(get_locations("Simpson, R")))
|
dfm/careermap
|
careermap/get_locations.py
|
get_locations.py
|
py
| 826 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28509751362
|
# coding: utf-8
"""
Messente API
[Messente](https://messente.com) is a global provider of messaging and user verification services. * Send and receive SMS, Viber, WhatsApp and Telegram messages. * Manage contacts and groups. * Fetch detailed info about phone numbers. * Blacklist phone numbers to make sure you're not sending any unwanted messages. Messente builds [tools](https://messente.com/documentation) to help organizations connect their services to people anywhere in the world. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from messente_api.configuration import Configuration
class DeliveryReportResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'statuses': 'list[DeliveryResult]',
'to': 'str',
'omnimessage_id': 'str'
}
attribute_map = {
'statuses': 'statuses',
'to': 'to',
'omnimessage_id': 'omnimessage_id'
}
def __init__(self, statuses=None, to=None, omnimessage_id=None, local_vars_configuration=None): # noqa: E501
"""DeliveryReportResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._statuses = None
self._to = None
self._omnimessage_id = None
self.discriminator = None
self.statuses = statuses
self.to = to
self.omnimessage_id = omnimessage_id
@property
def statuses(self):
"""Gets the statuses of this DeliveryReportResponse. # noqa: E501
Contains the delivery reports for each channel, ordered by send order # noqa: E501
:return: The statuses of this DeliveryReportResponse. # noqa: E501
:rtype: list[DeliveryResult]
"""
return self._statuses
@statuses.setter
def statuses(self, statuses):
"""Sets the statuses of this DeliveryReportResponse.
Contains the delivery reports for each channel, ordered by send order # noqa: E501
:param statuses: The statuses of this DeliveryReportResponse. # noqa: E501
:type statuses: list[DeliveryResult]
"""
if self.local_vars_configuration.client_side_validation and statuses is None: # noqa: E501
raise ValueError("Invalid value for `statuses`, must not be `None`") # noqa: E501
self._statuses = statuses
@property
def to(self):
"""Gets the to of this DeliveryReportResponse. # noqa: E501
Phone number in e.164 format # noqa: E501
:return: The to of this DeliveryReportResponse. # noqa: E501
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this DeliveryReportResponse.
Phone number in e.164 format # noqa: E501
:param to: The to of this DeliveryReportResponse. # noqa: E501
:type to: str
"""
if self.local_vars_configuration.client_side_validation and to is None: # noqa: E501
raise ValueError("Invalid value for `to`, must not be `None`") # noqa: E501
self._to = to
@property
def omnimessage_id(self):
"""Gets the omnimessage_id of this DeliveryReportResponse. # noqa: E501
Unique identifier for the omnimessage # noqa: E501
:return: The omnimessage_id of this DeliveryReportResponse. # noqa: E501
:rtype: str
"""
return self._omnimessage_id
@omnimessage_id.setter
def omnimessage_id(self, omnimessage_id):
"""Sets the omnimessage_id of this DeliveryReportResponse.
Unique identifier for the omnimessage # noqa: E501
:param omnimessage_id: The omnimessage_id of this DeliveryReportResponse. # noqa: E501
:type omnimessage_id: str
"""
if self.local_vars_configuration.client_side_validation and omnimessage_id is None: # noqa: E501
raise ValueError("Invalid value for `omnimessage_id`, must not be `None`") # noqa: E501
self._omnimessage_id = omnimessage_id
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeliveryReportResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DeliveryReportResponse):
return True
return self.to_dict() != other.to_dict()
|
messente/messente-api-python
|
messente_api/models/delivery_report_response.py
|
delivery_report_response.py
|
py
| 6,393 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6907369998
|
from flask import Flask, request, redirect
from twilio.twiml.messaging_response import MessagingResponse
from firebase import firebase
# from flask_cors import CORS
from twilio.rest import Client
import pyrebase
config = {
"apiKey": "AIzaSyAEEO1frXfzyL6MCkRvgGz7qURfsTLajRc",
"authDomain" : "covid-19-fake-news-detector.firebaseapp.com",
"databaseURL" : "https://covid-19-fake-news-detector.firebaseio.com",
"projectId" : "covid-19-fake-news-detector",
"storageBucket" : "covid-19-fake-news-detector.appspot.com",
"messagingSenderId" : "401417810179",
"appId" : "1:401417810179:web:b5c7dac2f172bfdc11f936",
"measurementId" : "G-59YT063WPN"
}
fb = pyrebase.initialize_app(config)
db = fb.database()
app = Flask(__name__)
app.config.from_object(__name__)
firebase = firebase.FirebaseApplication("https://covid-19-fake-news-detector.firebaseio.com/", None)
@app.route("/status", methods=['POST'])
def sms_status(key):
update = firebase.get('/Incoming/'+key['name'],'status')
from_whatsapp_no = 'whatsapp:+14155238886'
to_whatsapp_no = 'whatsapp:+9189********'
account = "ACa0b9328e73aae3240844*******"
token = "cdd6da1ea1baf8050d20005d*******"
client = Client(account,token)
return str(client.messages.create(body= update, from_ =from_whatsapp_no, to = to_whatsapp_no))
@app.route("/sms", methods=['POST'])
def sms_reply():
# Fetch the message
usrid = request.form.get('From')
print(usrid)
msg = request.form.get('Body')
#json format for firebase
data = {
"userid": usrid,
"news": msg,
"status": "Wait, we are processing your request"
}
print("coming")
#Create db
key = firebase.post('/Incoming', data)
print(key['name'])
#read db
update = firebase.get('/Incoming/'+key['name'],'status')
print(update)
# Create reply
resp = MessagingResponse()
resp.message(update)
return str(resp)
# else:
# default = "Wait, we are processing your request"
# return (default)
if __name__ == "__main__":
app.run(debug=True)
|
mayankchauhan96/Fake-news-detector
|
app.py
|
app.py
|
py
| 2,112 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31698964136
|
import pytorch_lightning as pl
import torch
from src.training_class import CFG, BertModule
if __name__ == "__main__":
torch.cuda.empty_cache()
model = BertModule()
trainer = pl.Trainer(
accelerator="gpu",
devices=1,
max_epochs=CFG.epochs,
precision=32,
gradient_clip_val=1e-1,
fast_dev_run=False,
profiler=None,
accumulate_grad_batches=4,
callbacks=None,
)
trainer.fit(model)
trainer.validate(model)
|
ArturYasnov/Quora-Insincere-Questions-using-BERT
|
train.py
|
train.py
|
py
| 500 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8266532966
|
import logging
import shutil
import sys
import click
from cekit.cache.artifact import ArtifactCache
from cekit.config import Config
from cekit.crypto import SUPPORTED_HASH_ALGORITHMS
from cekit.descriptor.resource import create_resource
from cekit.log import setup_logging
from cekit.tools import Map
from cekit.version import __version__
setup_logging()
LOGGER = logging.getLogger("cekit")
CONFIG = Config()
@click.group(context_settings=dict(max_content_width=100))
@click.option("-v", "--verbose", help="Enable verbose output.", is_flag=True)
@click.option(
"--config",
metavar="PATH",
help="Path to configuration file.",
default="~/.cekit/config",
show_default=True,
)
@click.option(
"--work-dir",
metavar="PATH",
help="Location of the working directory.",
default="~/.cekit",
show_default=True,
)
@click.version_option(message="%(version)s", version=__version__)
def cli(config, verbose, work_dir):
pass
@cli.command(name="ls", short_help="List cached artifacts")
def ls():
CacheCli.prepare().ls()
@cli.command(name="add", short_help="Add artifact to cache")
@click.argument("location", metavar="LOCATION")
@click.option("--md5", metavar="CHECKSUM", help="The md5 checksum of the artifact.")
@click.option("--sha1", metavar="CHECKSUM", help="The sha1 checksum of the artifact.")
@click.option(
"--sha256", metavar="CHECKSUM", help="The sha256 checksum of the artifact."
)
@click.option(
"--sha512", metavar="CHECKSUM", help="The sha512 checksum of the artifact."
)
def add(location, md5, sha1, sha256, sha512):
if not (md5 or sha1 or sha256 or sha512):
raise click.UsageError("At least one checksum must be provided")
CacheCli.prepare().add(location, md5, sha1, sha256, sha512)
@cli.command(name="rm", short_help="Remove artifact from cache")
@click.argument("uuid", metavar="UUID")
def rm(uuid):
CacheCli.prepare().rm(uuid)
@cli.command(name="clear", short_help="Remove all artifacts from the cache")
def clear():
CacheCli.prepare().clear()
class CacheCli:
@staticmethod
def prepare():
"""Returns an initialized object of CacheCli"""
return CacheCli(Map(click.get_current_context().parent.params))
def __init__(self, args):
# TODO: logging is used only when adding the artifact, we need to find out if it would be possible to do it better
if args.verbose:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
CONFIG.configure(args.config, {"work_dir": args.work_dir})
def add(self, location, md5, sha1, sha256, sha512):
artifact_cache = ArtifactCache()
resource = {}
resource["url"] = location
if md5:
resource["md5"] = md5
if sha1:
resource["sha1"] = sha1
if sha256:
resource["sha256"] = sha256
if sha512:
resource["sha512"] = sha512
artifact = create_resource(resource)
cached = artifact_cache.cached(artifact)
if cached:
click.echo("Artifact {} is already cached!".format(location))
sys.exit(0)
try:
artifact_id = artifact_cache.add(artifact)
click.echo(
"Artifact {} cached with UUID '{}'".format(location, artifact_id)
)
except Exception as ex:
click.secho(
"Cannot cache artifact {}: {}".format(location, str(ex)), fg="red"
)
sys.exit(1)
def ls(self):
artifact_cache = ArtifactCache()
artifacts = artifact_cache.list()
if artifacts:
for artifact_filename, artifact in artifacts.items():
click.echo(
"\n{}:".format(
click.style(
artifact_filename.split(".")[0], fg="green", bold=True
)
)
)
for alg in SUPPORTED_HASH_ALGORITHMS:
if alg in artifact and artifact[alg]:
click.echo(
" {}: {}".format(
click.style(alg, bold=True), artifact[alg]
)
)
if artifact["names"]:
click.echo(" {}:".format(click.style("names", bold=True)))
for name in artifact["names"]:
click.echo(" - %s" % name)
else:
click.echo("No artifacts cached!")
def rm(self, uuid: str):
artifact_cache = ArtifactCache()
try:
artifact_cache.delete(uuid)
click.echo("Artifact with UUID '{}' removed".format(uuid))
except Exception:
click.secho(
"Artifact with UUID '{}' doesn't exists in the cache".format(uuid),
fg="yellow",
)
sys.exit(1)
def clear(self):
"""
Removes the artifact cache directory with all artifacts.
Use with caution!
"""
artifact_cache = ArtifactCache()
if not click.confirm(
"Are you sure to remove all artifacts from cache?", show_default=True
):
return
try:
shutil.rmtree(artifact_cache.cache_dir)
click.echo("Artifact cache cleared!")
except Exception:
click.secho(
"An error occurred while removing the artifact cache directory '{}'".format(
artifact_cache.cache_dir
),
fg="red",
)
sys.exit(1)
if __name__ == "__main__":
cli()
|
cekit/cekit
|
cekit/cache/cli.py
|
cli.py
|
py
| 5,730 |
python
|
en
|
code
| 70 |
github-code
|
6
|
71889450427
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.views.generic import CreateView
from levangersundet.forms import DeltagerForm
from post_office import mail
class TestCreateView(CreateView):
form_class = DeltagerForm
template_name = 'test.html'
def get_success_url(self):
return '/nb/%s/' % self.testtype
def form_valid(self, form):
response = super(TestCreateView, self).form_valid(form)
mail.send(
[self.object.epost],
settings.SERVER_EMAIL,
template=self.testtype,
context={'deltager': self.object},
priority='now'
)
mail.send(
[settings.TEST_NOTIFY_EMAIL],
settings.SERVER_EMAIL,
template='%s_notify' % self.testtype,
context={'deltager': self.object},
priority='now'
)
return response
def dispatch(self, *args, **kwargs):
self.testtype = kwargs.get('testtype', False)
return super(TestCreateView, self).dispatch(*args, **kwargs)
|
fivethreeo/jsdev
|
mainapp/views.py
|
views.py
|
py
| 1,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71477031868
|
import sys
input = sys.stdin.readline
data1 = input().rstrip()
data2 = input().rstrip()
n1 = len(data1)
n2 = len(data2)
ans = 0
i = 0
while i <= n1-n2:
if data1[i:i+n2] == data2:
ans += 1
i += n2
else:
i += 1
print(ans)
|
YOONJAHYUN/Python
|
BOJ/1543.py
|
1543.py
|
py
| 259 |
python
|
en
|
code
| 2 |
github-code
|
6
|
49771831
|
from typing import *
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
res = []
if root is None:
return res
que = [root]
while len(que) > 0:
cur_res = []
q_size = len(que)
for _ in range(q_size):
node = que.pop(0)
if node.children is not None:
for child in node.children:
que.append(child)
cur_res.append(node.val)
res.append(cur_res)
return res
if __name__ == "__main__":
s = Solution()
node5 = Node(5)
node6 = Node(6)
node3 = Node(3, [node5, node6])
node2 = Node(2)
node4 = Node(4)
node1 = Node(1, [node3, node2, node4])
assert s.levelOrder(node1) == [[1], [3, 2, 4], [5, 6]]
|
code-cp/leetcode
|
solutions/429/main.py
|
main.py
|
py
| 991 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41563667577
|
from src.models.caja_moneda import CajaMoneda
from src.models.moneda_digital import MonedaDigital
from src.models.tipo_operacion import TipoOperacion
from src.models.transaccion import Transaccion
class Usuario(object):
def __init__(self, nombre, codigo):
self._nombre = nombre
self._codigo = codigo
self._caja_monedas = []
self._historial_transacciones = []
self._caja_monedas = self.iniciar_lista_monedas(100)
def iniciar_lista_monedas(self, cantidad_monedas_inicial):
cajas = []
for moneda in MonedaDigital:
caja = CajaMoneda(moneda)
caja.sumar_cantidad_monedas(cantidad_monedas_inicial)
cajas.append(caja)
return cajas
def agregar_transaccion(self, transaccion):
self._historial_transacciones.append(transaccion)
def restar_monedas(self, moneda_digital, cantidad):
position = self.buscar_pocicion(moneda_digital)
accion_realizada = False
if position is not None:
if cantidad <= self._caja_monedas[position].cantidad:
self._caja_monedas[position].restar_cantidad_monedas(cantidad)
accion_realizada = True
return accion_realizada
def sumar_monedas(self, moneda_digital, cantidad):
posicion = self.buscar_pocicion(moneda_digital)
accion_realizada = False
if posicion is not None:
self._caja_monedas[posicion].sumar_cantidad_monedas(cantidad)
accion_realizada = True
return accion_realizada
# busca en la lista de caja_monedas y devuelve
# si encontro la monedaDigital devuelve su posicion
# si no encotro una monedaDIgital devuelve None
def buscar_pocicion(self, moneda_digital: MonedaDigital):
position = None
for caja in self._caja_monedas:
if caja.moneda is moneda_digital.value:
position = self._caja_monedas.index(caja)
return position
def __str__(self):
mensaje = "Usuario \n nombre: " + self._nombre + "\n" + "Cajas:\n"
for caja in self._caja_monedas:
mensaje += "* " + caja.__str__()
mensaje += "Transacciones\n"
if len(self._historial_transacciones) == 0:
mensaje += "No hay Transacciones monetarias"
else:
for transaccion in self._historial_transacciones:
mensaje += "* " + transaccion.__str__()
return mensaje
@property
def caja_monedas(self):
return self._caja_monedas
@property
def nombre(self):
return self._nombre
@property
def historial_transacciones(self):
if len(self._historial_transacciones) == 0:
return "Sin transacciones"
else:
transacciones = "Transacciones de " + self.nombre + "\n"
for transaccion in self._historial_transacciones:
transacciones += transaccion.__str__()
return transacciones
@property
def codigo(self):
return self._codigo
|
Andres-Fernandez-Caballero/monedero_virtual
|
src/models/Usuario.py
|
Usuario.py
|
py
| 3,049 |
python
|
es
|
code
| 0 |
github-code
|
6
|
25549591929
|
import logging
import os
import sys
def configLogger():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(_get_logfile_name())
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
root.addHandler(file_handler)
def _get_logfile_name():
executable_name = os.path.basename(sys.argv[0]).split('.')[0]
return '/tmp/logs/%s.log' % executable_name
|
cipriantruica/news_diffusion
|
news-spreading-master/logger/logger.py
|
logger.py
|
py
| 529 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1918587531
|
import sys
import datetime
import csv
from os import path
""""
This section describes all functions called when the program is started.
"""
def startup():
create_date_file()
create_bought_file()
create_sold_file()
def create_date_file():
"""Check if there is already a file present containing the date set as current date. If not: create one."""
if path.exists('date.txt') == False:
date = str(datetime.date.today())
file = open('date.txt', 'w')
file.write(date)
file.close()
def create_bought_file():
"""Check if there is already a bought.csv file present. If not: create one"""
if path.exists('bought.csv') == False:
with open('bought.csv', 'w', newline='') as csvfile:
bought_creation = csv.writer(csvfile)
bought_creation.writerow(['id', 'product_name', 'buy_date', 'buy_price', 'expiration_date'])
def create_sold_file():
"""Check if there is already a sold.csv file present. If not: create one"""
if path.exists('sold.csv') == False:
with open('sold.csv', 'w', newline='') as csvfile:
sold_creation = csv.writer(csvfile)
sold_creation.writerow(['id', 'bought_id', 'sell_date', 'sell_price'])
|
YorrickvB/SuperpyV2
|
startup.py
|
startup.py
|
py
| 1,282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44091065220
|
import os
import gc
import re
import sys
import copy
import time
import random
import tempfile
import logging
import cPickle as cp
import multiprocessing
import subprocess
import deepity
import numpy as np
import numpy.random as npr
import smat as sm
import scipy
import scipy.stats
from . import std
from . import hpsearch as hp
from . import _io_
from . import util
from .data import datasource
from . import globals
from .report import training_report, calc_auc, bootstrap_auc
import node as _node
import trainer as _trainer
class _object_factory_from_file(object):
def __init__(self,filename,fieldname=None):
self.filename = filename
self.fieldname = fieldname
def __call__(self):
obj = _io_.load(self.filename)
if self.fieldname and isinstance(obj,dict):
return obj[self.fieldname]
return obj
def _create_model(model_proto, hparams):
model = copy.deepcopy(model_proto)
for key,val in hparams.iteritems():
prefix,path = key.split(":") # look for hparams named "model:..."
if prefix == "model":
nodepath,attrname = path.rsplit(".",1)
node = model.find(nodepath)
if hasattr(node,"set_"+attrname):
getattr(node,"set_"+attrname)(model,val) # call model.set_xxx(val)
else:
setattr(node,attrname,val)
return model
def _create_trainer(trainer_proto, hparams):
trainer = copy.deepcopy(trainer_proto)
for key,val in hparams.iteritems():
prefix,attrname = key.split(":") # look for hparams named "trainer:..."
if prefix == "trainer":
if hasattr(trainer,"set_"+attrname):
getattr(trainer,"set_"+attrname)(model,val) # call trainer.set_xxx(val)
else:
setattr(trainer,attrname,val)
return trainer
def _slice_hparams(hparams, inst):
h = copy.deepcopy(hparams)
for key in h.keys():
h[key] = h[key][inst]
return h
def load_hparams_result(filename):
with open(filename,'r') as f:
lines = f.readlines()
params = {}
result = 0.0
for line in lines:
# Look for validation performance
matches = re.findall("# metric = (\S+)", line)
if len(matches) > 0:
result = float(matches[0])
continue
# Add hparam
name, value = re.findall(" *(\S+) += (\S+)", line)[0]
if name in [":cfgname"]:
params[name] = value
else:
params[name] = float(value)
return hp.sample(params, result)
def save_hparams_result(filename, hparams_result, metric_key):
util.makepath(os.path.dirname(filename))
with open(filename,'w') as f:
if metric_key:
f.write("# metric = %f (%s)\n" % (hparams_result.metrics[metric_key], metric_key))
f.write(hparams2str(hparams_result.params))
def _save_model_inst(filename, inst, model, hparams):
m = copy.deepcopy(model)
sm.sync()
# Slice the trainable weights
m.slice_inst(inst)
# Also slice the hyperparams, and replace corresponding 'arrayed'
# attributes in the model with their scalar (sliced element) counterpart.
h = _slice_hparams(hparams,inst)
for key,val in h.iteritems():
prefix,path = key.split(":") # look for hparams named "model:..."
if prefix != "model":
continue
nodepath,attrname = path.rsplit(".",1)
node = m.find(nodepath)
if hasattr(node,"set_"+attrname):
getattr(node,"set_"+attrname)(model,val) # call model.set_xxx(val)
else:
setattr(node,attrname,val)
# Dump the model
util.makepath(os.path.dirname(filename))
with open(filename,'wb') as f:
cp.dump(m,f)
sm.sync() # Make sure we wait until the sarrays are all dumped
def gen_predictions(model, data):
# We must feed each sequence through the model several times
# by applying the model repeatedly on sliding a window along the sequence.
# That generates a prediction map, from which we can take max, sum, etc.
predictions = []
gmaps = {}
batches = data.asbatches(batchsize=128, reshuffle=False)
for batch in batches:
args = batch.input_data()
args["want_bprop_inputs"] = False
if isinstance(model.Z.origin().node,std.softmaxnode):
args["bprop_inputs_loss"] = std.nll()
else:
args["bprop_inputs_loss"] = std.mse()
outputs = model.eval(**args)
Z = outputs['Z'].asnumpy()
Zmask = outputs.get('Zmask',None)
if Zmask is not None:
Zmask = Zmask.asnumpy()
Z = Z[Zmask.ravel()]
predictions.append(Z)
# Concatenate all numpy arrays if they're the same size
predictions = np.vstack(predictions)
return predictions
def getinstdir(outdir, targetname, trialnum, foldid):
if isinstance(outdir,str):
return outdir
outdir = [_ for _ in outdir] # Make a copy that we can substitute elements in
args = {"target" : targetname,
"trial" : trialnum,
"fold" : foldid}
for i,item in enumerate(outdir):
if isinstance(item, tuple):
name, patt = item
if args[name] is None:
outdir[i] = None
else:
outdir[i] = patt % args[name]
instdir = "/".join([part for part in outdir if part is not None])
return instdir
def load_metrics(filename):
metrics = {}
with open(filename) as f:
groupnames = f.readline().rstrip().split()
for line in f:
line = line.rstrip().split()
for i,val in enumerate(line[1:]):
metrics.setdefault(groupnames[i],{})[line[0]] = val
return metrics
def save_metrics(outfile, metrics):
with open(outfile,"w") as f:
groupnames = sorted(metrics.keys())
fieldnames = set()
for groupname in groupnames:
for fieldname in metrics[groupname].keys():
fieldnames.add(fieldname)
fieldnames = sorted(list(fieldnames))
f.write(" "*14+"\t".join(groupnames) + "\n")
rows = {}
for groupname in groupnames:
for fieldname in fieldnames:
fieldval = metrics[groupname].setdefault(fieldname, np.nan)
if not isinstance(fieldval,np.ndarray):
if isinstance(fieldval, float):
fmt = "%.2e" if fieldname.endswith(".p") else "%.6f"
fieldval = fmt%fieldval
rows.setdefault(fieldname,[]).append(str(fieldval))
f.writelines([fieldname + " "*max(0,14-len(fieldname)) + "\t".join(rows[fieldname]) +"\n" for fieldname in fieldnames])
def call_dumpviz(dumpdir):
subprocess.Popen(["python", os.path.dirname(__file__)+"/dumpviz.py", dumpdir])
##########################################
class hypertrain_worker(object):
"""
Given a dataset and specific hyperparameters, this object will
simply train a model (an array of models) and return the
validation error (array of validation errors).
"""
def __init__(self, worker_id, model_proto, trainer_proto, datasrc,
nfold, allfolds, outdir, report_class, devices, verbose,
default_dtype, global_flags, auxfilter, mode, dumpviz):
self.worker_id = worker_id
# All the data subsets in 'trainset' will be merged into a single fold.
self.model_proto = model_proto
self.trainer_proto = trainer_proto
self.datasrc = datasrc # Load a copy of the dataset into this worker process.
self.nfold = nfold
self.allfolds = allfolds
self.outdir = outdir
self.mode = mode
self.aucrange = (0.5,0.5) # threshold for making AUCs out of non-binary targets, presumed to be in range [0,1]
self.report_class = report_class
self.auxfilter = auxfilter
self.dumpviz = dumpviz
globals.flags.copy_from(global_flags)
# If we've been called from a new process, create a separate log file.
# Otherwise everything is logged into the original log file.
if multiprocessing.current_process().name != "MainProcess":
logdir = getinstdir(outdir,None,None,None)
worker_logfile = os.path.join(logdir,"hypertrain_worker%d.log" % worker_id)
globals.set_logging(worker_logfile,level=verbose,echo=False)
logging.info("\n----------------------------- %s -----------------------------" % time.strftime("%y-%m-%d %H-%M-%S",time.localtime()))
# Configure deepity to use this worker's GPU device.
logging.info("worker %d starting on device %d using %s" % (worker_id,devices[worker_id],sm.get_default_dtype().__name__))
rseed = int((time.time()*100000 + worker_id) % 2000)
globals.reset_backend(device=devices[worker_id], seed=rseed)
random.seed(rseed)
sm.set_default_dtype(default_dtype)
npr.seed(rseed)
# Seed this process's random number generator, for reproducibility
sm.sync()
# Prepare the datasource to serve data.
self.datasrc.open()
def __del__(self):
self.datasrc.close()
self.datasrc = None
gc.collect() # Clear out the cruft and make sure the backend can be destroyed
sm.sync()
sm.destroy_backend()
def __call__(self, hparams, task_ids, sample_ids):
# Determine what kind of targets we want to train on
data = self.datasrc.astargets(task_ids) # Copies of arbitrary targets
data = data[:] # Copy so that when we normalize etc we don't affect the original data
# Normalize the targets. For logisitic-output models this means
# scaling targets to [0,1]. For other models this means scaling
# targets to have mean=0, variance=1.
data.requirements = self.model_proto.data_requirements()
#print np.percentile(data.Y[data.Ymask].ravel(), [99, 99.99, 99.995, 99.999])
#print data.Y.size, int(data.Y.size*(100-99.95)/100)
if "clamp_targets" in globals.flags:
data.clamp_extremes(0.0,99.95)
if "normalize_targets" in globals.flags:
data.normalize_targets()
#data.arcsinhtransform_targets()
if self.mode != 'calib':
# If we're not in calibration mode, then there's no need for multiple checkpoints
# -- just keep the last checkpoint so that it can be dumped to disk
#del hparams["trainer:checkpoints"]
self.trainer_proto.checkpoints = 1
# Shuffle the individual rows of data, always the same random shuffle
# and therefore always the same random split each time the code is run.
data.shuffle()
# Create a callback handler to collect predictions and evaluate final performance
checkpoints = self.report_class()
# Perform k-fold cross validation (k=nfold), training with one fold held out at a time.
for foldid in range(self.nfold):
checkpoints.setfold(foldid) # Tell the checkpoint
# Create a new model and trainer with the given hyperparams
model = _create_model(self.model_proto, hparams)
trainer = _create_trainer(self.trainer_proto, hparams)
# Split the data into training and validation sets
trdata, vadata = data.split(foldid, self.nfold-1)
trdata = trdata.augmented(trdata)
datasets = { "train" : trdata }
if vadata:
vadata = vadata.augmented(vadata)
datasets["validate"] = vadata
if self.auxfilter:
datasets["validate_aux"] = vadata[[i for i in range(len(vadata)) if vadata.foldids[i] in self.auxfilter]]
for dset in datasets.values():
dset.requirements = model.data_requirements()
#if not checkpoint_callback:
# trainer.viz_steps = False # Disable periodic updates if no reports
# Train the model and remember how well it performed.
trainer.train(model, datasets, checkpoints)
if self.mode == 'train' and self.nfold > 1:
entries = checkpoints.curr()
metrics = self.calc_metrics(entries)
self.save_model(model, hparams, task_ids, sample_ids, foldid)
self.save_metrics(metrics, task_ids, sample_ids, foldid)
self.save_predictions(entries, task_ids, sample_ids, foldid)
self.call_dumpviz(task_ids, sample_ids, foldid)
# If we`re only supposed to try one fold, then don`t bother looping over the other splits
if not self.allfolds:
break
# Consolidate the separate folds, and dump them if need be
entries = checkpoints.combined()
# Calculate the performance stats associated with each target
metrics = self.calc_metrics(entries)
# Save the current model and predictions
if self.mode == 'train':
self.save_predictions(entries, task_ids, sample_ids, None)
self.save_metrics(metrics, task_ids, sample_ids, None)
if self.nfold == 1:
self.save_model(model, hparams, task_ids, sample_ids, None)
self.save_preprocessors(data, task_ids, sample_ids, None)
#self.call_dumpviz(task_ids, sample_ids, None)
# Return a new hparams object with the performance incorporated
hpsearch_result = self.add_hparam_metrics(hparams, metrics)
return hpsearch_result
def save_model(self, model, hparams, task_ids, sample_ids, foldid):
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
util.makepath(dumpdir)
# Slice out model i and save it to disk
_save_model_inst(dumpdir+"/model.pkl", i, model, hparams)
def save_predictions(self, entries, task_ids, sample_ids, foldid):
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
util.makepath(dumpdir)
# Save out the predictions for model i
assert len(entries[i]) == 1, "Bug. Expected only a single unique 'step' in the list of entries"
groups = entries[i].values()[0]
np.savez_compressed(dumpdir+"/predict.npz",
targetname=np.asarray(taskid, dtype=object),
groups=np.asarray(groups, dtype=object))
def save_metrics(self, metrics, task_ids, sample_ids, foldid):
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
util.makepath(dumpdir)
# Save out the predictions for model i
assert len(metrics[i]) == 1, "Bug. Expected only a single unique 'step' in the list of entries"
groups = metrics[i].values()[0]
save_metrics(dumpdir+"/metrics.txt", groups)
def call_dumpviz(self, task_ids, sample_ids, foldid):
if not self.dumpviz:
return
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
call_dumpviz(dumpdir)
def save_preprocessors(self, data, task_ids, sample_ids, foldid):
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
data.dump_preprocessors(dumpdir, slice(i,i+1))
def add_hparam_metrics(self, hparams, metrics):
groupkey = "validate" if "validate" in metrics[0].values()[0] else "train"
hpsearch_result = {}
for i in metrics:
for step in metrics[i]:
hparams_i = { key : val[i] for key,val in hparams.iteritems() }
hparams_i["trainer:max_steps"] = step
metrics_i = metrics[i][step][groupkey]
hpsearch_result.setdefault(i,[]).append((hparams_i, metrics_i)) # Thus tuple is returned to hpsearch
return hpsearch_result
"""
if "vloss" in stats and stats["vloss"] is not None:
loss.append(stats["vloss"])
auc.append(stats["vauc"])
else:
loss.append(stats["tloss"])
auc.append(stats["tauc"])
if self.testfilter is not None:
tidx = [i for i in range(len(vdata)) if vdata.foldids[i] in self.testfilter]
tdata = vdata[tidx]
tpred = gen_predictions(model, tdata)
testauc,teststd = bootstrap_auc(tpred.ravel(), tdata.Y.ravel(), ntrial=20)
flogfile = self.outdir + "/%s_%04d/fold%d.log" % (task_ids[0], sample_ids[0], foldid)
with open(flogfile) as fh:
flog = fh.readlines()
flog[-1] = flog[-1].rstrip() + "\ttestAUC=%.3f (%f)\n" % (testauc,teststd)
with open(flogfile,"w") as fh:
fh.writelines(flog)
testaucs.append((testauc, teststd))
if report:
reports.append(report)
report.dump(want_html=True)
#report.dump(want_html=self.want_html)
# Dump each model to a separate file
for inst in range(len(sample_ids)):
filename = self.outdir + ("/%s_%04d/fold%d.model.pkl" % (task_ids[inst], sample_ids[inst], foldid))
_save_model_inst(filename, inst, model, hparams)
"""
#break
""""
if reports != []:
# Dump the separate (individual) hyperparams that were used for each instance trained
for inst in range(len(sample_ids)):
dumpdir = self.outdir + ("/%s_%04d/" % (task_ids[inst], sample_ids[inst]))
vloss = self.validation_performance[task_ids[inst]] if self.validation_performance else None
_dump_hparams(dumpdir, _slice_hparams(hparams,inst), vloss)
tdata.dump_preprocessors(dumpdir, slice(inst,inst+1))
merged = self.report_class.merge_reports(self.outdir + "/%(task_id)s_%(sample_id)04d/final.log", task_ids, sample_ids, reports)
#merged.dump(want_html=self.want_html)
merged.dump()
if testaucs:
flogfile = self.outdir + "/%s_%04d/final.log" % (task_ids[0], sample_ids[0])
with open(flogfile) as fh:
flog = fh.readlines()
testauc = sum([_auc for _auc, _std in testaucs]) / len(testaucs)
teststd = sum([_std for _auc, _std in testaucs]) / len(testaucs)
flog[-1] = flog[-1].rstrip() + "\ttestAUC=%.3f (%f)\n" % (testauc,teststd)
with open(flogfile,"w") as fh:
fh.writelines(flog)
# Average the loss over each fold
loss = np.mean(np.asarray(loss),axis=0)
auc = np.mean(np.asarray(auc),axis=0)
# Dump each average loss and corresponding hyperparameters into a log file
for inst in range(len(sample_ids)):
util.makepath(self.outdir+"/hpsearch")
with open(self.outdir+"/hpsearch/%s.log"%task_ids[inst],"a") as f:
f.write("%.6f\t%.4f\t%s\n"%(loss[inst], auc[inst], hparams2str( _slice_hparams(hparams,inst) ).replace("\n",";")) )
"""
sm.sync()
# Return a list of objective values, one per search_id
values = [float(x) for x in loss]
return values
def calc_metrics(self, entries):
metrics = {}
for taskidx in entries:
for step in entries[taskidx]:
for group in entries[taskidx][step]:
entry = entries[taskidx][step][group]
Z = entry["Z"]
Y = entry["Y"]
# Start computing stats
metric = metrics.setdefault(taskidx,{}).setdefault(step,{}).setdefault(group,{})
metric["loss"] = entry["L"]
if Z.shape[1] == 1:
metric.update(deepity.calc_metrics(Z.ravel(), Y.ravel(), self.aucrange))
return metrics
def hparams2str(params):
txt = ""
for key in sorted(params.keys()):
value = params[key]
if isinstance(value, np.ndarray) and value.size > 10:
value = "ndarray"
txt += " %s = %s\n" % (key + " "*max(0,20-len(key)),value)
return txt
#######################################
def hypertrain(model, trainer, data,
nfold=2, allfolds=True, outdir=None, nsample=20,
devices=None, verbose=None, report_class=None,
auxfilter=None):
if report_class is None: report_class = training_report
# Create the output directory if it doesn't already exist.
if outdir is None:
outdir = join(tempfile.gettempdir(),"hypertrain")
# Define the search space
space = _get_hypertrain_searchspace(model, trainer)
# Perform the search, returning the best parameters in the search space.
logging.info("calibrating...")
samples = hp.search(space,
objective = hypertrain_worker,
objective_initargs = (model,trainer,data,nfold,allfolds,outdir,report_class,devices,False,sm.get_default_dtype(),globals.flags,auxfilter,"calib",False),
task_ids = data.targetnames,
nsample = nsample,
nprocess = len(devices),
nsample_per_process = 15,
print_progress = True)
logging.info("...calibrating done")
return samples
###########################################
def train(model, trainer, data, hparams=None, hparams_metric=None,
nfold=1, outdir=None, nsample=1,
devices=None, verbose=None, report_class=None,
auxfilter=None, dumpviz=True):
if report_class is None: report_class = training_report
if hparams:
for targetname in data.targetnames:
for sample in range(nsample):
for fold in range(nfold):
save_hparams_result(getinstdir(outdir, targetname, sample, fold)+"/calib.txt", hparams[targetname], hparams_metric)
space = _get_fixed_searchspace(model, trainer, data.targetnames, hparams)
#space = _get_hypertrain_searchspace(model, trainer)
#if space and not hparams:
# raise ValueError("The given model has undetermined hyperparamters. Must call hypertrain first.")
# Replace the randomly sampled hparams with fixed values specified by 'hparams'
#for pname in space._pdefs.iterkeys():
# pbest = np.asarray([hparams[task_id].params[pname] for task_id in data.targetnames])
# space._pdefs[pname] = hp.fixed(pbest, pname)
#print "assigning hparam",pname,"<-",pbest
final_outdir = outdir
logging.info("train...")
hp.search(space,
objective = hypertrain_worker,
objective_initargs = (model,trainer,data,nfold,True,final_outdir,report_class,devices,verbose,sm.get_default_dtype(),globals.flags,auxfilter,"train",dumpviz),
task_ids = data.targetnames,
nsample = nsample,
nsample_per_process = 2,#len(data.targetnames), # Hack: only train numtargets models at a time, to ensure that when nsample>1 the next sample gets a different minibatch order
nprocess = len(devices))
logging.info("...train done")
#######################################################
def _get_fixed_searchspace(model, trainer, targetnames, hparams):
pdefs = []
if hparams:
# Convert the hparams list-of-dictionaries (all dictionaries having same key)
# into a single dictionary-of-lists
hpvec = {}
for targetname in targetnames:
sample = hparams[targetname]
for pkey in sample.params:
hpvec.setdefault(pkey,[]).append(sample.params[pkey])
for key in hpvec:
pdefs.append(hp.fixed(np.array(hpvec[key]), key))
space = hp.space(pdefs)
return space
def _get_hypertrain_searchspace(model, trainer):
# First, collect all hparams by visiting the model's dependency graph
model_hparams = []
def collect_hparam(path,attr):
if isinstance(attr,hp.paramdef):
attr.name = "model:" + path # model:...path
model_hparams.append(attr)
model.visit(collect_hparam)
# Next, ask the trainer for its hyperparams, and put a "trainer." prefix on the name of each one
# so that they don't conflict with model_hparams
trainer_hparams = []
for name,attr in trainer.__dict__.iteritems():
if isinstance(attr,hp.paramdef):
attr.name = "trainer:" + name # trainer:...path
trainer_hparams.append(attr)
# Return a search space built from model and trainer hyperparams
return hp.space(trainer_hparams + model_hparams)
|
jisraeli/DeepBind
|
code/libs/deepity/deepity/hypertrain.py
|
hypertrain.py
|
py
| 25,376 |
python
|
en
|
code
| 85 |
github-code
|
6
|
12207565885
|
from exercises import *
from graders import *
from training_util import *
from rlbottraining.common_exercises.kickoff_exercise import *
def make_default_playlist():
exercises = [
# KickoffExercise('Both Corners', blue_spawns=[Spawns.CORNER_R, Spawns.CORNER_L], orange_spawns = []),
#KickoffExercise('Right Corner 50/50', blue_spawns=[Spawns.CORNER_R], orange_spawns = [Spawns.CORNER_R]),
KickoffExercise('Right Corner', blue_spawns=[Spawns.CORNER_R], orange_spawns = []),
KickoffExercise('Left Corner', blue_spawns=[Spawns.CORNER_L], orange_spawns = []),
KickoffExercise('Back Right', blue_spawns=[Spawns.BACK_R], orange_spawns = []),
KickoffExercise('Back Left', blue_spawns=[Spawns.BACK_L], orange_spawns = []),
KickoffExercise('Straight', blue_spawns=[Spawns.STRAIGHT], orange_spawns = []),
]
for exercise in exercises:
exercise.match_config = make_match_config_with_cfg()
return exercises
|
mattlegro/Teach
|
training/kickoff_playlist.py
|
kickoff_playlist.py
|
py
| 976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16413370006
|
import json
import os
import pygame
from pprint import pformat
from pyggui.core import TRANSPARENT
pygame.font.init()
_DEFAULT_THEME_PATH = 'assets/themes/default_theme.json'
_DEFAULT_THEME_PATH = os.path.join(os.path.dirname(__file__), _DEFAULT_THEME_PATH)
NULL_THEME_DEFAULTS = {
"col": TRANSPARENT,
"width": 0,
"padding": [0, 0],
"radius": 0,
"font": {
"name": "calibri",
"size": 32
}
}
class Theme:
def __init__(self, file=None):
self.file = file
if self.file is None:
self.file = _DEFAULT_THEME_PATH
self.changed = False
self._all_styles = self._load_theme_json(self.file)
self._top_level_theme = self._all_styles.get("*")
self._styles = self._top_level_theme.copy()
@staticmethod
def _load_theme_json(file):
with open(file, mode='r') as theme_file:
themes = json.load(theme_file)
return themes
def get_widget_theme(self, widget=None):
widget_theme = self._top_level_theme.copy()
if widget is None:
return widget_theme
widget_type = widget.__class__.__name__.lower()
# Update the widget theme with the widget type level attributes
widget_type_theme = self._all_styles.get(widget_type, {})
widget_theme |= widget_type_theme
new_theme = Theme()
new_theme._styles = widget_theme.copy()
return new_theme
def __getitem__(self, item):
value = self._styles.get(item, None)
if value is None and any(key in item for key in NULL_THEME_DEFAULTS.keys()):
value = [v for k, v in NULL_THEME_DEFAULTS.items() if k == item or k in item][0]
return value
def __setitem__(self, item, value):
old_styles = self._styles.copy()
if item in self._styles:
self._styles["bg-col"] = value
self.changed = self.changed or old_styles != self._styles
def __repr__(self):
return pformat(self._styles)
@property
def font(self):
font_dict = self['font']
name = font_dict.get('name')
size = font_dict.get('size')
try:
font_ = pygame.font.Font(name, size)
except FileNotFoundError:
font_ = pygame.font.SysFont(name, size)
font_.set_bold(font_dict.get('bold', False))
font_.set_italic(font_dict.get('italic', False))
font_.set_underline(font_dict.get('underline', False))
return font_
def copy(self):
copy = Theme()
copy._styles = self._styles.copy()
return copy
if __name__ == '__main__':
theme = Theme()
print(theme)
|
sam57719/PygGUI
|
pyggui/theme.py
|
theme.py
|
py
| 2,672 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44284258376
|
import sys, dpkt, socket
from dpkt.compat import compat_ord
class Statistics: #Statistic Class: Used just to store global stats of the following info
connCount = 0
rstCount = 0
openCount = 0
closeCount = 0
duration = 0
minDuration = 0
meanDuration = 0
maxDuration = 0
RTTCount = 0
RTT = []
minRTT = -1
meanRTT = -1
maxRTT = -1
pktCount = 0
minPacket = 0
meanPacket = 0
maxPacket = 0
window = []
minWindow = -1
meanWindow = -1
maxWindow = -1
class Packet(object): #Packet Class: used to store packet info. A packet class is created for each packet, and destroyed after being analyzed
srcMac = ""
dstMac = ""
srcIP = ""
dstIP = ""
IPLen = -1
id = -1
seq = -1
ack = -1
windowSize = -1
flagsBin = -1
flags = []
srcPort = -1
dstPort = -1
time = -1
class Connection: #Connection Info. Used to store
def __init__(self, packet):
self.srcAdd = packet.srcIP
self.dstAdd = packet.dstIP
self.srcPort = packet.srcPort
self.dstPort = packet.dstPort
self.status = [0, 0, 0] #SYN Count, FIN Count, RST Count
self.startTime = packet.time
self.endTime = packet.time
self.srcDstPacketCount = 0
self.dstSrcPacketCount = 0
self.packetCount = 0
self.srcDstByteCount = 0
self.dstSrcByteCount = 0
self.byteCount = 0
self.initialClientSeq = packet.seq + 1
self.initialServerSeq = 0
self.pastClientSeq = -50
self.pastServerSeq = 0
self.pastClientPacketTime = packet.time
self.pastServerPacketTime = 0
self.RTTCount = 0
self.calRTT = 0
self.duration = 0
self.RTT = []
self.window = []
class Connections:
def __init__(self):
self.links = []
self.size = 0
def add(self, connection):
self.links.append(connection)
self.size = self.size + 1
def printConnections(self):
count = 1
for link in self.links:
print("Connection " + str(count) + ":")
print("Source Address: " + link.srcAdd)
print("Destination Address: " + link.dstAdd)
print("Source Port: " + str(link.srcPort))
print("Destination Port: " + str(link.dstPort))
print("Status: " + "S" + str(link.status[0]) + "F" + str(link.status[1]) + "R" + str(link.status[2]))
if link.status[0] >= 1:
if link.status[1] >= 1:
print("Start Time: " + str(link.startTime) + "ms")
print("End Time: " + str(link.endTime) + "ms")
print("Duration: " + str(link.duration) + "ms")
print("Number of packets send from Source to Destination: " + str(link.srcDstPacketCount))
print("Number of packets send from Destination to Source: " + str(link.dstSrcPacketCount))
print("Total number of packets: " + str(link.packetCount))
print("Number of data bytes send from Source to Destination: " + str(link.srcDstByteCount))
print("Number of data bytes send from Destination to Source: " + str(link.dstSrcByteCount))
print("Total number of data bytes: " + str(link.byteCount))
print("END")
count = count + 1
if count <= (self.size): print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def printPacket(packet):
print("Source MAC: " + packet.srcMac)
print("Destination MAC: " + packet.dstMac)
print("Source IP: " + packet.srcIP)
print("Destination IP: " + packet.dstIP)
print("IP Header Length: " + str(packet.IPLen))
print("Packet ID: " + str(packet.id))
print("Sequence: " + str(packet.seq))
print("Acknowledgement: " + str(packet.ack))
print("Window Size: " + str(packet.windowSize))
print("Flag Binary: " + bin(packet.flagsBin))
print("Flags: " + str(packet.flags))
print("Source Port: " + str(packet.srcPort))
print("Destination Port: " + str(packet.dstPort))
print("Time: " + str(packet.time))
def mac_addr(address): #Refer to Reference #Used to convert binary to mac addresses
return ":".join("%02x" % compat_ord(b) for b in address)
def inet_to_str(inet): #Refer to Reference #Used to convert binary to Ip Addresses
return socket.inet_ntop(socket.AF_INET, inet)
def binToFlags(packet): #Binary Flag parsing
packet.flags = []
if packet.flagsBin & 0b1: packet.flags.append("FIN")
if packet.flagsBin & 0b10: packet.flags.append("SYN")
if packet.flagsBin & 0b100: packet.flags.append("RST")
if packet.flagsBin & 0b1000: packet.flags.append("PSH")
if packet.flagsBin & 0b10000: packet.flags.append("ACK")
if packet.flagsBin & 0b100000: packet.flags.append("URG")
return packet
def clientInitialRTT(stats, connection, packet): #The initial time for RTT
connection.pastClientSeq = packet.seq #Initial sequence number sent
connection.pastClientPacketTime = packet.time #Initial packet time
return 0
def clientFinalRTT(stats, connection, packet): #Client final RTT
if connection.pastClientSeq <= packet.ack: #Ensure that the ack receieved corresponds to an ack
RTT = packet.time - connection.pastClientPacketTime #Calculate RTT time between the matching seq to ack
connection.RTT.append(RTT) #Append RTT calculation to connection for mean and other purposes
return 0
def updateDstSrcCount(connection, packet): #Calculation to update the byte and packet count from the destunation to source
connection.packetCount = connection.packetCount + 1
connection.srcDstPacketCount = connection.srcDstPacketCount + 1
connection.dstSrcByteCount = packet.ack - connection.initialServerSeq - 1
connection.byteCount = connection.srcDstByteCount + connection.dstSrcByteCount
return packet.ack - connection.initialServerSeq - 1
def updateSrcDstCount(connection, packet): #Method to update the byte and packet count from the source to destionation
connection.packetCount = connection.packetCount + 1
connection.dstSrcPacketCount = connection.dstSrcPacketCount + 1
if connection.initialServerSeq == 0: #Initial server / client 3 way hand shake scenario handling
connection.initialServerSeq = packet.seq + 1
connection.srcDstByteCount = packet.ack - connection.initialClientSeq
connection.byteCount = connection.srcDstByteCount + connection.dstSrcByteCount
return packet.ack - connection.initialClientSeq
def printFinal(stats, connections):
print("A) Total number of connections: " + str(connections.size))
print("___________________________________________________________________________________")
print("")
print("B) Connection's details:")
print("")
connections.printConnections()
print("___________________________________________________________________________________")
print("")
print("C) General:")
print("")
print("Total number of complete TCP connections: " + str(stats.closeCount))
print("Number of reset TCP connections: " + str(stats.rstCount))
print("Number of TCP connections that were still open when the trace capture ended: " + str(stats.openCount))
print("___________________________________________________________________________________")
print("")
print("D) Complete TCP connections:")
print("")
print("Minimum time durations: " + str(stats.minDuration) + "ms")
print("Mean time durations: " + str(stats.meanDuration) + "ms")
print("Maximum time duration: " + str(stats.maxDuration) + "ms")
print("")
print("Minimum RTT values: " + str(stats.minRTT) + "ms")
print("Mean RTT values: " + str(stats.meanRTT) + "ms")
print("Maximum RTT values: " + str(stats.maxRTT) + "ms")
print("")
print("Minimum number of packets including both send/received: " + str(stats.minPacket))
print("Mean number of packets including both send/received: " + str(stats.meanPacket))
print("Maximum number of packets including both send/received: " + str(stats.maxPacket))
print("")
print("Minimum receive window sizes including both send/received: " + str(stats.minWindow))
print("Mean receive window sizes including both send/received: " + str(stats.meanWindow))
print("Maximum receive window sizes including both send/receive: " + str(stats.maxWindow))
print("___________________________________________________________________________________")
def analyzePacket(stats, connections, packet): #Series of function calls that analyzes all the packets
for connection in connections.links: #Checks whether a connection exists in file for the packet being analyzed
if (connection.srcAdd == packet.srcIP) and (connection.dstAdd == packet.dstIP) and (connection.srcPort == packet.srcPort) and (connection.dstPort == packet.dstPort):
if "SYN" in packet.flags:
connection.status[0] = connection.status[0] + 1 #Update SYN Count
if "FIN" in packet.flags:
connection.status[1] = connection.status[1] + 1 #Update FIN Count
connection.endTime = packet.time #Update END TIME
if "RST" in packet.flags:
connection.status[2] = connection.status[2] + 1 #Update RST Count
connection.window.append(packet.windowSize) #Store Window Size
byteTransfered = updateDstSrcCount(connection, packet) #Calculate if any bytes were sent /received, and store
if "SYN" in packet.flags or "FIN" in packet.flags: #Calculate the RTT if it is SYN or FIN
connection.calRTT = 1
clientInitialRTT(stats, connection, packet)
return 1
#Serires of function calls that analyzes all the packets
#Similar as to above, but for server -> destionation packets
if (connection.dstAdd == packet.srcIP) and (connection.srcAdd == packet.dstIP) and (connection.dstPort == packet.srcPort) and (connection.srcPort == packet.dstPort):
if "SYN" in packet.flags:
connection.status[0] = connection.status[0] + 1
if "FIN" in packet.flags:
connection.status[1] = connection.status[1] + 1
connection.endTime = packet.time
if "RST" in packet.flags:
connection.status[2] = connection.status[2] + 1
connection.window.append(packet.windowSize)
byteTransfered = updateSrcDstCount(connection, packet)
#Only calculate RTT is, it is SYN, has data transfered
if ((byteTransfered > 0 and "ACK" in packet.flags) or "SYN" in packet.flags) and connection.calRTT == 1:
connection.calRTT = 0
clientFinalRTT(stats, connection, packet)
return 1
connection = Connection(packet) #If there isn't any existing connection, create a connection
connection.srcDstPacketCount = connection.srcDstPacketCount + 1 #Update packet count
connection.packetCount = connection.packetCount + 1 #
connection.status[0] = connection.status[0] + 1 #Add syn count
stats.openCount = stats.openCount + 1 #Add open count
stats.connCount = stats.connCount + 1 #Add connection cvount
connection.window.append(packet.windowSize) #Store c
connections.add(connection)
if "SYN" in packet.flags: #If SYN, prepare values for RTT
connection.calRTT = 1
clientInitialRTT(stats, connection, packet)
return 0
def finalStatCheck(stats, connections): #After analyzing all the packets
for connection in connections.links: #For all connections
if connection.status[0] >= 1: #If SYN
if connection.status[1] >= 1: #If FIN
stats.openCount = stats.openCount - 1 #Complete connection
stats.closeCount = stats.closeCount + 1
connection.duration = connection.endTime - connection.startTime #min mean max duration for all compelete connections
stats.duration = stats.duration + connection.duration
if stats.minDuration == 0:
stats.minDuration = connection.duration
if stats.maxDuration == 0:
stats.maxDuration = connection.duration
if connection.duration <= stats.minDuration:
stats.minDuration = connection.duration
if connection.duration >= stats.maxDuration:
stats.maxDuration = connection.duration
stats.pktCount = stats.pktCount + connection.packetCount #min mean max packet count for all complete connections
if stats.minPacket == 0:
stats.minPacket = connection.packetCount
if stats.maxPacket == 0:
stats.maxPacket = connection.packetCount
if connection.packetCount <= stats.minPacket:
stats.minPacket = connection.packetCount
if connection.packetCount >= stats.maxPacket:
stats.maxPacket = connection.packetCount
stats.window.extend(connection.window) #update connection window for min, mean max calculations
stats.RTT.extend(connection.RTT) #add rtt for min mean max valculations
if connection.status[2] >= 1:
stats.rstCount = stats.rstCount + 1
stats.meanDuration = stats.duration / stats.closeCount #mean duration calculation
stats.meanPacket = stats.pktCount / stats.closeCount #mean packet count duration
stats.minWindow = min(stats.window) #min mean max window size calculation
stats.maxWindow = max(stats.window)
stats.meanWindow = sum(stats.window)/stats.pktCount
stats.minRTT = min(stats.RTT) #min mean max RTT calculation
stats.maxRTT = max(stats.RTT)
stats.meanRTT = sum(stats.RTT) / len(stats.RTT)
return 1
def main():
traceFileName = sys.argv[1] #name of file to read from
traceFile = open(traceFileName, "rb") #open the file to read in binary
tracePcap = dpkt.pcap.Reader(traceFile) #use a reader to parse
stats = Statistics()
connections = Connections()
count = 0
for timeStamp, buf in tracePcap: #Refer to reference. Parts of the referenced code has been deleted or modified.
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data #IP Header
tcp = ip.data #TCP Header
packet = Packet() #Storing various values into a packet class
packet.srcMac = mac_addr(eth.src)
packet.dstMac = mac_addr(eth.dst)
packet.srcIP = inet_to_str(ip.src)
packet.dstIP = inet_to_str(ip.dst)
packet.IPLen = ip.len
packet.id = ip.id
packet.seq = tcp.seq
packet.ack = tcp.ack
packet.windowSize = tcp.win
packet.flagsBin = tcp.flags
packet.srcPort = tcp.sport
packet.dstPort = tcp.dport
packet.time = timeStamp
packet = binToFlags(packet)
analyzePacket(stats, connections, packet) #For each packet, analyze
del packet
finalStatCheck(stats, connections)
printFinal(stats, connections)
main()
# Parsing is taken from the link below, in particluar the mac_addr, and inet_to_str.
# Opening the file, and obtaining the buffer and timestamp is from
# http://dpkt.readthedocs.io/en/latest/_modules/examples/print_packets.html?highlight=print%20ip
|
dmahw/CSC_361_TCPTrafficAnalysis
|
TCPTrafficAnalysis.py
|
TCPTrafficAnalysis.py
|
py
| 17,576 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34003465685
|
mylist = ["eat", "tea", "tan", "ate", "nat", "bat"]
emptylist = []
for i in mylist:
tempSet = set(i)
templist = []
for j in range(len(mylist)):
if tempSet == tempSet.intersection(set(mylist[j])):
templist.append(mylist[j])
if templist not in emptylist:
emptylist.append(templist)
templist = []
print(emptylist)
|
Tommyhappy01/1-IT_FUNDAMENTAL
|
python/try/anagram v11.py
|
anagram v11.py
|
py
| 379 |
python
|
en
|
code
| 5 |
github-code
|
6
|
12173424287
|
import numpy as np
def calibrate(input_path, output_path, filename):
X = np.load(input_path + filename + '.npy')
y_gt = np.zeros(X.shape[0], dtype=int);
m = 0
# for i in range(0,1250):
# y_gt[i] = 1
# m += 1
for i in range(9346,13577):
y_gt[i] = 1
m += 1
# for i in range(11512,11712):
# y_gt[i] = 1
# m += 1
for i in range(14069,18355):
y_gt[i] = 1
m += 1
print("保存标定的真实值:" + output_path + filename + '.npy', "测试集异常点数量: ", int(m / 2))
np.save(output_path + filename + '.npy', y_gt)
return
|
MarcWong/wavelet
|
utils/calibrate.py
|
calibrate.py
|
py
| 629 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39627561667
|
import time
import json
import math
import csv
import serial # conda install pyserial
import sys
import glob
# pygame needs python 3.6, not available for 3.7
import pygame # conda install -c cogsci pygame; maybe because it only is supplied for earlier python, might need conda install -c evindunn pygame ; sudo apt-get install libsdl-ttf2.0-0
import pygame.joystick as joystick # https://www.pygame.org/docs/ref/joystick.html
from datetime import datetime
# our imports
import kbhit
from pendulum import Pendulum
POLOLU_MOTOR = False # set true to set options for this motor, which has opposite sign for set_motor TODO needs fixing in firmware or wiring of motor
SERIAL_PORT = "COM4" #"/dev/ttyUSB0" # might move if other devices plugged in
SERIAL_BAUD = 230400 # default 230400, in firmware. Alternatives if compiled and supported by USB serial intervace are are 115200, 128000, 153600, 230400, 460800, 921600, 1500000, 2000000
PRINT_PERIOD_MS = 100 # shows state every this many ms
CONTROL_PERIOD_MS = 5
CALIBRATE = False #False # important to calibrate if running standalone to avoid motor burnout because limits are determined during this calibration
MOTOR_FULL_SCALE = 7199 # 7199 # with pololu motor and scaling in firmware #7199 # with original motor
MOTOR_MAX_PWM = int(round(0.95 * MOTOR_FULL_SCALE))
JOYSTICK_SCALING = MOTOR_MAX_PWM # how much joystick value -1:1 should be scaled to motor command
JOYSTICK_DEADZONE = 0.05 # deadzone around joystick neutral position that stick is ignored
ANGLE_TARGET = 3129 # 3383 # adjust to exactly vertical angle value, read by inspecting angle output
ANGLE_CTRL_PERIOD_MS = 5 # Must be a multiple of CONTROL_PERIOD_MS
ANGLE_AVG_LENGTH = 4 # adc routine in firmware reads ADC this many times quickly in succession to reduce noise
ANGLE_SMOOTHING = 1 # 1.0 turns off smoothing
ANGLE_KP = 400
ANGLE_KD = 400
POSITION_TARGET = 0 # 1200
POSITION_CTRL_PERIOD_MS = 25 # Must be a multiple of CONTROL_PERIOD_MS
POSITION_SMOOTHING = 1 # 1.0 turns off smoothing
POSITION_KP = 20
POSITION_KD = 300
def serial_ports(): # from https://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
# if cannot open, check permissions
ports = glob.glob('/dev/ttyUSB[0-9]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def saveparams():
print("\nSaving parameters")
p={}
p['ANGLE_TARGET']=ANGLE_TARGET
p['ANGLE_KP']=ANGLE_KP
p['ANGLE_KD']=ANGLE_KD
p['POSITION_TARGET']=POSITION_TARGET
p['POSITION_KP']=POSITION_KP
p['POSITION_KD']=POSITION_KD
p['ANGLE_SMOOTHING']=ANGLE_SMOOTHING
p['POSITION_SMOOTHING']=POSITION_SMOOTHING
with open('control.json','w') as f:
json.dump(p,f)
def loadparams():
print("\nLoading parameters")
f=open('control.json')
try:
p=json.load(f)
global ANGLE_TARGET, ANGLE_KP,ANGLE_KD,POSITION_TARGET,POSITION_KP,POSITION_KD,ANGLE_SMOOTHING,POSITION_SMOOTHING
ANGLE_TARGET=p['ANGLE_TARGET']
ANGLE_KP=p['ANGLE_KP']
ANGLE_KD=p['ANGLE_KD']
POSITION_TARGET=p['POSITION_TARGET']
POSITION_KP=p['POSITION_KP']
POSITION_KD=p['POSITION_KD']
ANGLE_SMOOTHING=p['ANGLE_SMOOTHING']
POSITION_SMOOTHING=p['POSITION_SMOOTHING']
except:
print("something went wrong loading parameters")
printparams()
def help():
print("\n***********************************")
print("keystroke commands")
print("ESC quit")
print("k toggle control on/off (initially off)")
print("K trigger motor position calibration")
print("=/- increase/decrease angle target")
print("[/] increase/decrease position target")
print("w/q angle proportional gain")
print("s/a angle derivative gain")
print("z/x angle smoothing")
print("r/e position proportional gain")
print("f/d position derivative gain")
print("c/v position smoothing")
print("l toggle logging data")
print("S/L Save/Load param values from disk")
print("D Toggle dance mode")
print("***********************************")
def printparams():
print("\nAngle PD Control Parameters")
print(" Set point {0}".format(ANGLE_TARGET))
print(" Average Length {0}".format(ANGLE_AVG_LENGTH))
print(" Smoothing {0:.2f}".format(ANGLE_SMOOTHING))
print(" P Gain {0:.2f}".format(ANGLE_KP))
print(" D Gain {0:.2f}".format(ANGLE_KD))
print("Position PD Control Parameters")
print(" Set point {0}".format(POSITION_TARGET))
print(" Control Period {0} ms".format(POSITION_CTRL_PERIOD_MS))
print(" Smoothing {0:.2f}".format(POSITION_SMOOTHING))
print(" P Gain {0:.2f}".format(POSITION_KP))
print(" D Gain {0:.2f}".format(POSITION_KD))
ratio=1.05
def inc(param):
if param < 2:
param = round(param + 0.1, 1)
else:
old=param
param = round(param*ratio)
if param==old:
param+=1
return param
def dec(param):
if param < 2:
param = max(0,round(param - 0.1, 1))
else:
old=param
param = round(param/ratio)
if param==old:
param-=1
return param
if ANGLE_CTRL_PERIOD_MS < CONTROL_PERIOD_MS or POSITION_CTRL_PERIOD_MS <CONTROL_PERIOD_MS:
raise Exception("angle or position control periods too short compared to CONTROL_PERIOD_MS")
# check that we are running from terminal, otherwise we cannot control it
if sys.stdin.isatty():
# running interactively
print('running interactively from an interactive terminal, ok')
else:
print('run from an interactive terminal to allow keyboard input')
quit()
################################################################################
# OPEN SERIAL PORT
################################################################################
p = Pendulum()
serialPorts=serial_ports()
print('Available serial ports: '+str(serialPorts))
if len(serialPorts)==0:
print('no serial ports available, or cannot open it; check linux permissions\n Under linux, sudo chmod a+rw [port] transiently, or add user to dialout or tty group')
quit()
if len(serialPorts)>1:
print(str(len(serialPorts))+' serial ports, taking first one which is '+str(serialPorts[0]))
SERIAL_PORT=str(serialPorts[0])
try:
p.open(SERIAL_PORT, SERIAL_BAUD)
except:
print('cannot open port '+str(SERIAL_PORT)+': available ports are '+str(serial_ports()))
quit()
print('opened '+str(SERIAL_PORT)+' successfully')
p.control_mode(False)
p.stream_output(False)
joystickExists=False
pygame.init()
joystick.init()
if joystick.get_count()==1:
stick = joystick.Joystick(0)
stick.init()
axisNum = stick.get_numaxes()
buttonNum = stick.get_numbuttons()
joystickExists=True
print('joystick found with '+str(axisNum)+' axes and '+str(buttonNum)+' buttons')
else:
print('no joystick found, only PD control or no control possible')
if CALIBRATE:
print("Calibrating motor position....")
if not p.calibrate():
print("Failed to connect to device")
p.close()
exit()
print("Done calibrating")
loadparams()
time.sleep(1)
################################################################################
# SET PARAMETERS
################################################################################
p.set_angle_config( ANGLE_TARGET,
ANGLE_AVG_LENGTH,
ANGLE_SMOOTHING,
ANGLE_KP,
ANGLE_KD)
p.set_position_config( POSITION_TARGET,
POSITION_CTRL_PERIOD_MS,
POSITION_SMOOTHING,
POSITION_KP,
POSITION_KD)
################################################################################
# GET PARAMETERS
################################################################################
( ANGLE_TARGET,
ANGLE_AVG_LENGTH,
ANGLE_SMOOTHING,
ANGLE_KP,
ANGLE_KD) = p.get_angle_config()
( POSITION_TARGET,
POSITION_CTRL_PERIOD_MS,
POSITION_SMOOTHING,
POSITION_KP,
POSITION_KD) = p.get_position_config()
################################################################################
# CONTROL LOOP (PC BASED)
################################################################################
printCount = 0
angleErrPrev = 0
angleCmd = 0
positionErrPrev = 0
positionCmd = 0
controlEnabled=False
danceEnabled=False
danceAmpl=500
dancePeriodS=8
loggingEnabled=False
kbAvailable=True
try:
kb = kbhit.KBHit() # can only use in posix terminal; cannot use from spyder ipython console for example
except:
kbAvailable=False
printparams()
help()
startTime=time.time()
lastTime=startTime
lastAngleControlTime=lastTime
lastPositionControlTime=lastTime
angleErr=0
positionErr=0 # for printing even if not controlling
p.stream_output(True) # now start streaming state
while True:
# Adjust Parameters
if kbAvailable & kb.kbhit():
c = kb.getch()
if c=='D':
danceEnabled=~danceEnabled
print("\ndanceEnabled= {0}".format(danceEnabled))
elif c == 'l':
loggingEnabled=~loggingEnabled
print("\nloggingEnabled= {0}".format(loggingEnabled))
if loggingEnabled:
try:
csvfilename=datetime.now().strftime("cartpole-%Y-%m-%d-%H-%M-%S.csv")
csvfile=open(csvfilename, 'w', newline='')
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(['time'] + ['deltaTimeMs']+['angle'] + ['position'] + ['angleTarget'] + ['angleErr'] + ['positionTarget'] + ['positionErr'] + ['angleCmd'] + ['positionCmd'] + ['motorCmd']+['actualMotorCmd'])
print("\n Started logging data to "+csvfilename)
except Exception as e:
loggingEnabled=False
print("\n" + str(e) + ": Exception opening csvfile; logging disabled")
else:
csvfile.close()
print("\n Stopped logging data to "+csvfilename)
elif c == 'k':
controlEnabled=~controlEnabled
print("\ncontrolEnabled= {0}".format(controlEnabled))
elif c == 'K':
controlEnabled=False
print("\nCalibration triggered")
p.calibrate()
print("\nCalibration finished")
elif c == 'h' or c=='?':
help()
elif c == 'p' :
printparams()
# Increase Target Angle
elif c == '=':
ANGLE_TARGET += 1
print("\nIncreased target angle to {0}".format(ANGLE_TARGET))
# Decrease Target Angle
elif c == '-':
ANGLE_TARGET -= 1
print("\nDecreased target angle to {0}".format(ANGLE_TARGET))
# Increase Target Position
elif c == ']':
POSITION_TARGET += 200
print("\nIncreased target position to {0}".format(POSITION_TARGET))
# Decrease Target Position
elif c == '[':
POSITION_TARGET -= 200
print("\nDecreased target position to {0}".format(POSITION_TARGET))
# Angle Gains
elif c == 'w':
ANGLE_KP=inc(ANGLE_KP)
print("\nIncreased angle KP {0}".format(ANGLE_KP))
elif c == 'q':
ANGLE_KP=dec(ANGLE_KP)
print("\nDecreased angle KP {0}".format(ANGLE_KP))
elif c == 's':
ANGLE_KD=inc(ANGLE_KD)
print("\nIncreased angle KD {0}".format(ANGLE_KD))
elif c == 'a':
ANGLE_KD=dec(ANGLE_KD)
print("\nDecreased angle KD {0}".format(ANGLE_KD))
elif c == 'x':
ANGLE_SMOOTHING=dec(ANGLE_SMOOTHING)
if ANGLE_SMOOTHING>1:
ANGLE_SMOOTHING=1
print("\nIncreased ANGLE_SMOOTHING {0}".format(ANGLE_SMOOTHING))
elif c == 'z':
ANGLE_SMOOTHING=inc(ANGLE_SMOOTHING)
if ANGLE_SMOOTHING>1:
ANGLE_SMOOTHING=1
print("\nDecreased ANGLE_SMOOTHING {0}".format(ANGLE_SMOOTHING))
# Position Gains
elif c == 'r':
POSITION_KP=inc(POSITION_KP)
print("\nIncreased position KP {0}".format(POSITION_KP))
elif c == 'e':
POSITION_KP=dec(POSITION_KP)
print("\nDecreased position KP {0}".format(POSITION_KP))
elif c == 'f':
POSITION_KD=inc(POSITION_KD)
print("\nIncreased position KD {0}".format(POSITION_KD))
elif c == 'd':
POSITION_KD=dec(POSITION_KD)
print("\nDecreased position KD {0}".format(POSITION_KD))
elif c == 'v':
POSITION_SMOOTHING=dec(POSITION_SMOOTHING)
if POSITION_SMOOTHING>1:
POSITION_SMOOTHING=1
print("\nIncreased POSITION_SMOOTHING {0}".format(POSITION_SMOOTHING))
elif c == 'c':
POSITION_SMOOTHING=inc(POSITION_SMOOTHING)
if POSITION_SMOOTHING>1:
POSITION_SMOOTHING=1
print("\nDecreased POSITION_SMOOTHING {0}".format(POSITION_SMOOTHING))
elif c=='S':
saveparams()
elif c=='L':
loadparams()
# Exit
elif ord(c) == 27 : # ESC
print("\nquitting....")
break
# This function will block at the rate of the control loop
# p.clear_read_buffer() # if we don't clear read buffer, state output piles up in serial buffer
(angle, position, command) = p.read_state()
# angle count is more positive CCW facing cart, position encoder count is more positive to right facing cart
timeNow=time.time()
deltaTime=timeNow-lastTime
if deltaTime==0:
deltaTime=1e-6
lastTime=timeNow
elapsedTime=timeNow-startTime
diffFactor=(CONTROL_PERIOD_MS/(deltaTime*1000))
positionTargetNow=POSITION_TARGET
if controlEnabled and danceEnabled:
positionTargetNow=POSITION_TARGET+danceAmpl*math.sin(2*math.pi*(elapsedTime/dancePeriodS))
# Balance PD Control
# Position PD Control
if timeNow -lastPositionControlTime >= POSITION_CTRL_PERIOD_MS*.001:
lastPositionControlTime=timeNow
positionErr = POSITION_SMOOTHING*(position - positionTargetNow) + (1.0 - POSITION_SMOOTHING)*positionErrPrev # First order low-P=pass filter
positionErrDiff = (positionErr - positionErrPrev)*diffFactor
positionErrPrev = positionErr
# Naive solution: if too positive (too right), move left (minus on positionCmd),
# but this does not produce correct control.
# The correct strategy is that if cart is too positive (too right),
# produce lean to the left by introducing a positive set point angle leaning slightly to left,
# i.e. more positve positionErr makes more positive effective ANGLE_TARGET
# End result is that sign of positionCmd is flipped
# Also, if positionErr is increasing more, then we want even more lean, so D sign is also positive
positionCmd = +(POSITION_KP*positionErr + POSITION_KD*positionErrDiff)
if timeNow-lastAngleControlTime >= ANGLE_CTRL_PERIOD_MS*.001:
lastAngleControlTime=timeNow
angleErr = ANGLE_SMOOTHING*(angle - ANGLE_TARGET) + (1.0 - ANGLE_SMOOTHING)*angleErrPrev # First order low-pass filter
angleErrDiff = (angleErr - angleErrPrev)*diffFactor # correct for actual sample interval; if interval is too long, reduce diff error
angleErrPrev = angleErr
angleCmd = -(ANGLE_KP*angleErr + ANGLE_KD*angleErrDiff) # if too CCW (pos error), move cart left
motorCmd = int(round(angleCmd + positionCmd)) # change to plus for original, check that when cart is displayed, the KP term for cart position leans cart the correct direction
motorCmd = MOTOR_MAX_PWM if motorCmd > MOTOR_MAX_PWM else motorCmd
motorCmd = -MOTOR_MAX_PWM if motorCmd < -MOTOR_MAX_PWM else motorCmd
stickPos=0.0
if joystickExists:
# for event in pygame.event.get(): # User did something.
# if event.type == pygame.QUIT: # If user clicked close.
# done = True # Flag that we are done so we exit this loop.
# elif event.type == pygame.JOYBUTTONDOWN:
# print("Joystick button pressed.")
# elif event.type == pygame.JOYBUTTONUP:
# print("Joystick button released.")
pygame.event.get() # must call get() to handle internal queue
stickPos=stick.get_axis(0) # 0 left right, 1 front back 2 rotate
if abs(stickPos)>JOYSTICK_DEADZONE:
actualMotorCmd=int(round(stickPos*JOYSTICK_SCALING))
elif controlEnabled:
actualMotorCmd=motorCmd
else:
actualMotorCmd=0
if POLOLU_MOTOR==False:
p.set_motor(-actualMotorCmd) # positive motor cmd moves cart right
else:
p.set_motor(actualMotorCmd) # positive motor cmd moves cart right
if loggingEnabled:
# csvwriter.writerow(['time'] + ['deltaTimeMs']+['angle'] + ['position'] + ['angleErr'] + ['positionErr'] + ['angleCmd'] + ['positionCmd'] + ['motorCmd'])
csvwriter.writerow([elapsedTime,deltaTime*1000,angle, position, ANGLE_TARGET, angleErr, positionTargetNow, positionErr, angleCmd,positionCmd,motorCmd,actualMotorCmd])
# Print output
printCount += 1
if printCount >= (PRINT_PERIOD_MS/CONTROL_PERIOD_MS):
printCount = 0
print("\r angle {:+4d} angleErr {:+6.1f} position {:+6d} positionErr {:+6.1f} angleCmd {:+6d} positionCmd {:+6d} motorCmd {:+6d} dt {:.3f}ms stick {:.3f} \r".format(int(angle), angleErr, int(position), positionErr, int(round(angleCmd)), int(round(positionCmd)), actualMotorCmd, deltaTime*1000, stickPos), end = '')
# if we pause like below, state info piles up in serial input buffer
# instead loop at max possible rate to get latest state info
# time.sleep(CONTROL_PERIOD_MS*.001) # not quite correct since there will be time for execution below
# when x hit during loop or other loop exit
p.set_motor(0) # turn off motor
p.close()
joystick.quit()
if loggingEnabled:
csvfile.close()
|
SensorsINI/DeltaGRU-cartpole
|
cartpole_robot/python_controller/control.py
|
control.py
|
py
| 19,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7661449629
|
import numpy as np
import netCDF4
from datetime import datetime, timedelta
from glob import glob
import os, sys
"""
This program is used to read input data.
"""
#******************************************
# Edit here (input file directories)
#------------------------------------------
slpbasedir = "/mnt/nas02/data/CMIP6"
tabasedir = "/mnt/nas02/data/CMIP6"
uabasedir = "/mnt/nas02/data/CMIP6"
vabasedir = "/mnt/nas02/data/CMIP6"
tsbasedir = "/mnt/nas02/data/CMIP6"
topobasedir= "/mnt/nas02/data/CMIP6"
landbasedir= "/mnt/nas02/data/CMIP6"
prbasedir = "/mnt/nas02/data/CMIP6" # not used for detection
#******************************************
dbasedir = {
"slp":slpbasedir,
"ta" :tabasedir,
"ua" :uabasedir,
"va" :vabasedir,
"sst":tsbasedir,
"topo":topobasedir,
"land":landbasedir,
"pr" :prbasedir, # not used for detection
}
dvar = {
"slp":"psl",
"ta" :"ta",
"ua" :"ua",
"va" :"va",
"sst":"ts",
"topo":"orog",
"land":"sftlf",
"pr" :"pr", # not used for detection
}
def ret_lats(model):
return np.load(slpbasedir + "/%s/lat.npy"%(model))
# MIROC6: -88.92773535 ~ 88.92773535, d=~1.4007664
def ret_lons(model):
return np.load(slpbasedir + "/%s/lon.npy"%(model))
# MIROC6: 0 ~ 358.59375, d=1.40625
def ret_ny(model):
return len(ret_lats(model))
def ret_nx(model):
return len(ret_lons(model))
# MIROC6: (128, 256)
def ret_miss(model):
modelname = model.split(".")[0]
if modelname=="MIROC6": miss_in= 9.969209968386869e+36
elif modelname=="MRI-ESM2-0": miss_in= 9.969209968386869e+36
elif modelname=="MPI-ESM1-2-HR":miss_in= 9.969209968386869e+36
return miss_in
def Load_6hrPlev(model, var, DTime, plev):
modelname, expr, ens = model.split(".")
vname = dvar[var]
iplev = [850, 500, 250].index(plev)
# Search file
srcdir = dbasedir[var] + "/%s"%(model)
ssearch = srcdir + "/%s_6hrPlev*.nc"%(vname)
lsrcpath = glob(ssearch)
for srcpath in lsrcpath:
stime = os.path.basename(srcpath).split("_")[6].split(".")[0]
stime0, stime1 = stime.split("-")
dtime0 = datetime.strptime(stime0, "%Y%m%d%H%M")
dtime1 = datetime.strptime(stime1, "%Y%m%d%H%M")
if (dtime0<=DTime)&(DTime<=dtime1):
break
nc = netCDF4.Dataset(srcpath)
#print(nc.variables)
#print(srcpath)
# Find time index
basetime = {
("MIROC6","piControl"): datetime(3200,1,1),
("MRI-ESM2-0","piControl"): datetime(1850,1,1),
("MRI-ESM2-0","historical"): datetime(1850,1,1),
("MPI-ESM1-2-HR","piControl"): datetime(1850,1,1),
}[modelname,expr]
dtime0 = basetime + timedelta(days=float(nc.variables["time"][0]))
idxtime = int((DTime - dtime0).total_seconds()/21600) # 6-hour = 21600 sec
#print(DTime, dtime0)
#print(idxtime)
return nc.variables[vname][idxtime, iplev]
def Load_6hrSfc(model, var, DTime):
modelname, expr, ens = model.split(".")
vname = dvar[var]
# Search file
srcdir = dbasedir[var] + "/%s"%(model)
ssearch = srcdir + "/%s_6hrPlev*.nc"%(vname)
lsrcpath = np.sort(glob(ssearch))
for srcpath in lsrcpath:
stime = os.path.basename(srcpath).split("_")[6].split(".")[0]
stime0, stime1 = stime.split("-")
dtime0 = datetime.strptime(stime0, "%Y%m%d%H%M")
dtime1 = datetime.strptime(stime1, "%Y%m%d%H%M")
if (dtime0<=DTime)&(DTime<=dtime1):
break
nc = netCDF4.Dataset(srcpath)
#print(nc.variables)
#print(srcpath)
# Find time index
basetime = {
("MIROC6","piControl"): datetime(3200,1,1),
("MRI-ESM2-0","piControl"): datetime(1850,1,1),
("MRI-ESM2-0","historical"): datetime(1850,1,1),
}[modelname,expr]
dtime0 = basetime + timedelta(days=float(nc.variables["time"][0]))
idxtime = int((DTime - dtime0).total_seconds()/21600) # 6-hour = 21600 sec
return nc.variables[vname][idxtime]
#return nc.variables[vname].shape
def Load_monSfc(model, var, Year, Mon):
modelname, expr, ens = model.split(".")
vname = dvar[var]
DTime = datetime(Year,Mon,1)
# Search file
srcdir = dbasedir[var] + "/%s"%(model)
ssearch = srcdir + "/%s_Amon*.nc"%(vname)
lsrcpath = np.sort(glob(ssearch))
for srcpath in lsrcpath:
stime = os.path.basename(srcpath).split("_")[6].split(".")[0]
stime0, stime1 = stime.split("-")
dtime0 = datetime.strptime(stime0, "%Y%m")
dtime1 = datetime.strptime(stime1, "%Y%m")
if (dtime0<=DTime)&(DTime<=dtime1):
break
nc = netCDF4.Dataset(srcpath)
#print(nc.variables)
#print(srcpath)
#print(nc.variables["time"][:])
#print(len(nc.variables["time"][:]))
# Find time index
Year0,Mon0 = dtime0.timetuple()[:2]
Year1,Mon1 = dtime1.timetuple()[:2]
idxtime = int(Year-Year0)*12 -Mon0 + Mon
#print(idxtime)
return nc.variables[vname][idxtime]
def Load_const(model, var):
vname = dvar[var]
srcdir = dbasedir[var] + "/%s"%(model)
ssearch = srcdir + "/%s_*.nc"%(vname)
lsrcpath= glob(ssearch)
srcpath = lsrcpath[0]
nc = netCDF4.Dataset(srcpath)
#print(nc.variables)
return nc.variables[vname][:]
|
nbykutsumi/wsd
|
dataloader_CMIP6.py
|
dataloader_CMIP6.py
|
py
| 5,339 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41685726399
|
import boto3
import sys
import time
# input value 'ansible-controller' while running the instance
#import json
ec2_client = boto3.client('ec2', region_name = "us-east-1")
instances = ec2_client.describe_instances()
for reservation in instances['Reservations']:
for instance in reservation["Instances"]:
if instance["Tags"][0]["Value"] == sys.argv[1]:
response = ec2_client.start_instances(InstanceIds=[instance["InstanceId"]])
istance_id = instance["InstanceId"]
while True:
print('checking the status........')
time.sleep(3)
status_value = ec2_client.describe_instance_status(InstanceIds=[istance_id])
if len(status_value['InstanceStatuses']) != 0:
if status_value['InstanceStatuses'][0]['InstanceState']['Name'] == 'running':
print(f"{sys.argv[1]} EC2 Instance got:- {status_value['InstanceStatuses'][0]['InstanceState']['Name']}")
break
print ("Press Enter to continue ..." )
input()
|
sudhann92/project-repo
|
aws-python/aws-boto-start-instance.py
|
aws-boto-start-instance.py
|
py
| 979 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12300903394
|
import numpy as np
from sklearn.datasets import make_classification
import pytest
from pygbm.binning import BinMapper
from pygbm.grower import TreeGrower
from pygbm import GradientBoostingRegressor
from pygbm import GradientBoostingClassifier
X, y = make_classification(n_samples=150, n_classes=2, n_features=5,
n_informative=3, n_redundant=0,
random_state=0)
def test_plot_grower(tmpdir):
pytest.importorskip('graphviz')
from pygbm.plotting import plot_tree
X_binned = BinMapper().fit_transform(X)
gradients = np.asarray(y, dtype=np.float32).copy()
hessians = np.ones(1, dtype=np.float32)
grower = TreeGrower(X_binned, gradients, hessians, max_leaf_nodes=5)
grower.grow()
filename = tmpdir.join('plot_grower.pdf')
plot_tree(grower, view=False, filename=filename)
assert filename.exists()
def test_plot_estimator(tmpdir):
pytest.importorskip('graphviz')
from pygbm.plotting import plot_tree
n_trees = 3
est = GradientBoostingRegressor(max_iter=n_trees)
est.fit(X, y)
for i in range(n_trees):
filename = tmpdir.join('plot_predictor.pdf')
plot_tree(est, tree_index=i, view=False, filename=filename)
assert filename.exists()
def test_plot_estimator_and_lightgbm(tmpdir):
pytest.importorskip('graphviz')
lightgbm = pytest.importorskip('lightgbm')
from pygbm.plotting import plot_tree
n_classes = 3
X, y = make_classification(n_samples=150, n_classes=n_classes,
n_features=5, n_informative=3, n_redundant=0,
random_state=0)
n_trees = 3
est_pygbm = GradientBoostingClassifier(max_iter=n_trees,
n_iter_no_change=None)
est_pygbm.fit(X, y)
est_lightgbm = lightgbm.LGBMClassifier(n_estimators=n_trees)
est_lightgbm.fit(X, y)
n_total_trees = n_trees * n_classes
for i in range(n_total_trees):
filename = tmpdir.join('plot_mixed_predictors.pdf')
plot_tree(est_pygbm, est_lightgbm=est_lightgbm, tree_index=i,
view=False, filename=filename)
assert filename.exists()
|
ogrisel/pygbm
|
tests/test_plotting.py
|
test_plotting.py
|
py
| 2,201 |
python
|
en
|
code
| 175 |
github-code
|
6
|
71050106107
|
__doc__ = """
=====================
Plugs Introduction
=====================
:Author: Limodou <[email protected]>
.. contents::
About Plugs
----------------
Plugs is an apps collection project for uliweb. So you can use any app of it
to compose your project.
License
------------
Plugs is released under BSD license. Of cause if there are some third party
apps not written by me(limodou), it'll under the license of itself.
"""
from uliweb.utils.setup import setup
import plugs
setup(name='plugs',
version=plugs.__version__,
description="Apps collection for uliweb",
long_description=__doc__,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Programming Language :: Python",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
],
packages = ['plugs'],
platforms = 'any',
keywords='wsgi web framework',
author=plugs.__author__,
author_email=plugs.__author_email__,
url=plugs.__url__,
license=plugs.__license__,
include_package_data=True,
zip_safe=False,
entry_points = {
'uliweb_apps': [
'helpers = plugs',
],
},
)
|
limodou/plugs
|
setup.py
|
setup.py
|
py
| 1,424 |
python
|
en
|
code
| 23 |
github-code
|
6
|
38958617780
|
from unittest import mock
from django.test import TestCase
from django.urls import resolve, reverse
from nltk import word_tokenize
from .models import ScrapeHistory
from .views import WordCountView
class ScrapeTest(TestCase):
def _mock_response(
self,
status=200,
content="CONTENT",
json_data=None,
raise_for_status=None):
mock_resp = mock.Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = mock.Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code and content
mock_resp.status_code = status
mock_resp.content = content
# add json data if provided
if json_data:
mock_resp.json = mock.Mock(
return_value=json_data
)
return mock_resp
def setUp(self):
self.url = reverse("wordcount")
self.post_params = {
"word": "fit",
"url": "https://www.virtusize.jp/"
}
def test_wordcount_url_resolves(self):
self.assertEqual(resolve(self.url).func.view_class, WordCountView)
def test_missing_param(self):
params = {
"word": "ABC"
}
response = self.client.post(
self.url, params, content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_invalid_link_value(self):
params = {
"word": "WORd",
"url": "https://virtuse"
}
response = self.client.post(
self.url, params, content_type='application/json')
self.assertEqual(response.status_code, 500)
@mock.patch("scrape.views.requests.get")
def test_wordcount_request_to_url(self, m):
mock_resp = self._mock_response(
content="<html><body>this is fit</body></html>")
m.return_value = mock_resp
response = WordCountView.scrap_url_and_word_count(self,
"fit", "https://www.virtusize.jp/")
self.assertEqual(response, 1)
@mock.patch("scrape.views.WordCountView.scrap_url_and_word_count", return_value=1)
def test_wordcount_success_result(self, m):
response = self.client.post(
self.url, self.post_params, content_type='application/json')
result = response.json()
test_content = {
"status": "ok",
"count": 1
}
self.assertEqual(result, test_content)
@mock.patch("scrape.views.WordCountView.scrap_url_and_word_count", return_value=1)
def test_save_to_db(self, m):
_ = ScrapeHistory.objects.create(
url="https://www.virtusize.jp/",
word="fit",
word_count=1
)
_ = self.client.post(
self.url, self.post_params, content_type='application/json')
self.assertEqual(ScrapeHistory.objects.last().word_count, 1)
self.assertNotEqual(ScrapeHistory.objects.last().word_count, 13)
def test_word_tokenize(self):
txt = "<html><body>Virtusize works for innovative idea. <'new idea'> idea-ly Although there are lot of new ideas but it focuses e-commerce</body></html>"
words = word_tokenize(txt)
self.assertEqual(words.count("idea"), 2)
self.assertNotEqual(words.count("idea"), 1)
|
iqbalalo/word_counter
|
src/scrape/tests.py
|
tests.py
|
py
| 3,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9109600438
|
import sys
from PyQt5.QtWidgets import *
class Main(QDialog):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
main_layout=QVBoxLayout()
#버튼 만들기
btn = QPushButton("Click me")
main_layout.addWidget(btn)
self.setLayout(main_layout)
self.resize(500,500)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
main = Main()
sys.exit(app.exec_())
|
kangminzu/battleship
|
battleship/import sys.py
|
import sys.py
|
py
| 527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
69997166908
|
import time
def test1():
now = 1543786615000
time_array = time.localtime(now / 1000)
other_style_time = time.strftime("%Y--%m--%d %H:%M:%S", time_array)
print(other_style_time)
def test2():
k = None
if isinstance(k, dict):
print(111)
else:
print(222)
def test3():
condition = False
# if not condition:
# raise AssertionError()
# assert condition
lists = ["", "", ""]
assert len(lists) >= 3, '列表元素个数小于3'
if __name__ == '__main__':
test3()
|
HasakiWMC/LOL_project
|
src/webserver/test/test.py
|
test.py
|
py
| 538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71951872187
|
class Persona():
def __init__(self, nombre, edad, lugar):
self.nombre = nombre
self.edad = edad
self.lugar = lugar
def descripcion(self):
print("Nombre: ", self.nombre, " Edad: ", self.edad, " Lugar: ", self.lugar)
class Empleado(Persona):
def __init__(self, nombre_empleado, edad_empleado, lugar_empleado, sueldo, antiguedad):
super().__init__(nombre_empleado, edad_empleado, lugar_empleado)
self.sueldo = sueldo
self.antiguedad = antiguedad
def descripcion(self):
super().descripcion()
print("Sueldo: ", self.sueldo, "Antigüedad: ", self.antiguedad)
Antonio = Persona('Antonio', '54', 'Canarias')
Antonio.descripcion()
Miguel = Empleado('Miguel', '37', 'Valencia', '35.000', '12')
Miguel.descripcion()
print(isinstance(Miguel, Empleado))
|
xanpena/phython-sintaxis
|
objetos/herencia_super.py
|
herencia_super.py
|
py
| 854 |
python
|
es
|
code
| 0 |
github-code
|
6
|
25653326394
|
from socket import NI_NAMEREQD
import sw_utils as utl
def assign_crew_members(starship, crew_positions, personnel):
"""Maps crew members by position to the passed in < starship > 'crew_members'
key. Both the < crew_positions > and < personnel > lists should contain the same number
of elements. The individual < crew_positions > and < personnel > elements are then paired
by index position and stored in a dictionary structured as follows:
{< crew_position[0] >: < personnel[0] >, < crew_position[1] >: < personnel[1] >, ...}
The crew members dictionary is mapped (i.e., assigned) to the passed in
starship's 'crew_members' key and the crewed starship is returned to the caller.
WARN: The number of crew members that can be assigned to the passed in < starship > is
limited by the starship's "crew_size" value. No additional crew members are permitted
to be assigned to the < starship >.
WARN: A single line dictionary comprehension that assigns a new dictionary to the passed in
< starship >'s "crew_members" key must be written in order to earn full credit. Utilize the
parameter names in the dictionary comprehension (DO NOT assign the passed in dictionary
and lists to local variables and then reference them in the comprehension).
Parameters:
starship (dict): Representation of a starship
crew_positions (list): crew positions (e.g., 'pilot', 'copilot', etc.)
personnel (list): persons to be assigned to the crew positions
Returns:
dict: starship with assigned crew members
"""
starship['crew_members'] = {crew_positions[i]: personnel[i] for i in range(starship['crew_size'])}
return starship
def board_passengers(starship, passengers):
"""Assigns < passengers > to the passed in < starship > but limits boarding to less than
or equal to the starship's "max_passengers" value. The passengers list (in whole or in part)
is then mapped (i.e., assigned) to the passed in starship's 'passengers_on_board' key. After
boarding the passengers the starship is returned to the caller.
WARN: The number of passengers permitted to board a starship is limited by the starship's
"max_passengers" value. If the number of passengers attempting to board exceeds the starship's
"max_passengers" value only the first n passengers (where `n` = "max_passengers") are
permitted to board the vessel.
Parameters:
starship (dict): Representation of a starship
passengers (list): passengers to transport aboard starship
Returns:
dict: starship with assigned passengers
"""
n = int(starship['max_passengers'])
if len(passengers) > n:
starship['passengers_on_board'] = passengers[0:n]
else:
starship['passengers_on_board'] = passengers
return starship
def calculate_articles_mean_word_count(articles):
"""Calculates the mean (e.g., average) word count of the passed in list of < articles >.
Excludes from the calculation any article with a word count of zero (0). Word counts
are summed and then divided by the number of non-zero word count articles. The resulting mean
value is rounded to the second (2nd) decimal place and returned to the caller.
WARN: Add a local variable to hold a count of the number of articles with a word count of
zero (0). Then subtract the zero word count from the total number of passed in articles in
order to ensure that the divisor reflects the actual number of articles upon which to
compute the mean.
Parameters:
articles (list): nested dictionary representations of New York Times articles
Returns:
float: mean word count rounded to the second (2nd) decimal place
"""
zero_count = 0
word_count = 0
for article in articles:
if article['word_count'] == 0:
zero_count += 1
else:
word_count = word_count + article['word_count']
mean_word_count = round(word_count / (len(articles) - zero_count), 2)
return mean_word_count
def convert_episode_values(episodes):
"""Converts select string values to either int, float, list, or None in the passed in list of
nested dictionaries. The function delegates to the < utl.convert_to_* > functions the task of
converting the specified strings to either int, float, or list (or None if utl.convert_to_none
is eventually called).
Conversions:
str to int: 'series_season_num', 'series_episode_num', 'season_episode_num'
str to float: 'episode_prod_code', 'episode_us_viewers_mm'
str to list: 'episode_writers'
Parameters:
episodes (list): nested episode dictionaries
Returns:
list: nested episode dictionaries containing mutated key-value pairs
"""
for episode in episodes:
for key, val in episode.items():
if key in ('series_season_num', 'series_episode_num', 'season_episode_num'):
episode[key] = utl.convert_to_int(val)
elif key in ('episode_prod_code', 'episode_us_viewers_mm'):
episode[key] = utl.convert_to_float(val)
elif key == 'episode_writers':
episode[key] = utl.convert_to_list(val, ', ')
return episodes
def count_episodes_by_director(episodes):
"""Constructs and returns a dictionary of key-value pairs that associate each director with a
count of the episodes that they directed. The director's name comprises the key and the
associated value a count of the number of episodes they directed. Duplicate keys are NOT
permitted.
Format:
{
< director_name_01 >: < episode_count >,
< director_name_02 >: < episode_count >,
...
}
Parameters:
episodes (list): nested episode dictionaries
Returns:
dict: a dictionary that store counts of the number of episodes directed
by each director
"""
director_dict = {}
for episode in episodes:
if episode['episode_director'] not in director_dict.keys():
director_dict[episode['episode_director']] = 1
else:
director_dict[episode['episode_director']] += 1
return director_dict
def create_droid(data):
"""Returns a new dictionary representation of a droid from the passed in < data >,
converting string values to the appropriate type whenever possible.
Type conversions:
height -> height_cm (str to float)
mass -> mass_kg (str to float)
equipment -> equipment (str to list)
Key order:
url
name
model
manufacturer
create_year
height_cm
mass_kg
equipment
instructions
Parameters:
data (dict): source data
Returns:
dict: new dictionary
"""
for val in data.values():
val = utl.convert_to_none(val)
return {
'url': data.get('url'),
'name': data.get('name'),
'model': data.get('model'),
'manufacturer': data.get('manufacturer'),
'create_year': data.get('create_year'),
'height_cm': utl.convert_to_float(data.get('height')),
'mass_kg': utl.convert_to_float(data.get('mass')),
'equipment': utl.convert_to_list(data.get('equipment'), '|'),
'instructions': data.get('instructions')
}
def create_person(data, planets=None):
"""Returns a new dictionary representation of a person from the passed in < data >,
converting string values to the appropriate type whenever possible.
Both the person's "homeworld" and "species" values are used to retrieve SWAPI dictionary
representations of the planet and specie values. Retrieving the SWAPI homeworld and
species data is delegated to the function < utl.get_resource >.
If an optional Wookieepedia-sourced < planets > list is provided, the task of retrieving
the appropriate nested dictionary (filters on the passed in homeworld planet
name) is delegated to the function < get_wookieepedia_data >.
Before the homeworld and species data is mapped (e.g. assigned) to the person's "homeworld"
and "species" keys, the functions < create_planet > and < create_species > are called
in order to provide new dictionary representations of the person's homeworld and species.
Type conversions:
height -> height_cm (str to float)
mass -> mass_kg (str to float)
homeworld -> homeworld (str to dict)
species -> species (str to dict)
Key order:
url
name
birth_year
height_cm
mass_kg
homeworld
species
force_sensitive
Parameters:
data (dict): source data
planets (list): optional supplemental planetary data
Returns:
dict: new dictionary
"""
for val in data.values():
val = utl.convert_to_none(val)
person_dict = {
'url': data.get('url'),
'name': data.get('name'),
'birth_year': data.get('birth_year'),
'height_cm': utl.convert_to_float(data.get('height')),
'mass_kg': utl.convert_to_float(data.get('mass')),
}
homeworld_dict = utl.get_resource(utl.SWAPI_PLANETS, {'search': data.get('homeworld')})['results'][0]
species_dict = create_species(utl.get_resource(utl.SWAPI_SPECIES, {'search': data.get('species')})['results'][0])
if planets:
planets_update = get_wookieepedia_data(planets, data['homeworld'])
homeworld_dict.update(planets_update)
person_dict['homeworld'] = create_planet(homeworld_dict)
else:
person_dict['homeworld'] = homeworld_dict
person_dict['species'] = species_dict
person_dict['force_sensitive'] = data.get('force_sensitive')
return person_dict
def create_planet(data):
"""Returns a new dictionary representation of a planet from the passed in < data >,
converting string values to the appropriate type whenever possible.
Type conversions:
suns -> suns (str->int)
moon -> moons (str->int)
orbital_period -> orbital_period_days (str to float)
diameter -> diameter_km (str to int)
gravity -> gravity_std (str to float)
climate -> climate (str to list)
terrain -> terrain (str to list)
population -> population (str->int)
Key order:
url
name
region
sector
suns
moons
orbital_period_days
diameter_km
gravity_std
climate
terrain
population
Parameters:
data (dict): source data
Returns:
dict: new dictionary
"""
for val in data.values():
val = utl.convert_to_none(val)
return {
'url': data.get('url'),
'name': data.get('name'),
'region': utl.convert_to_none(data.get('region')),
'sector': utl.convert_to_none(data.get('sector')),
'suns': utl.convert_to_int(data.get('suns')),
'moons': utl.convert_to_int(data.get('moons')),
'orbital_period_days': utl.convert_to_float(data.get('orbital_period')),
'diameter_km': utl.convert_to_int(data.get('diameter')),
'gravity_std': utl.convert_gravity_value(data.get('gravity')),
'climate': utl.convert_to_list(data.get('climate'), ', '),
'terrain': utl.convert_to_list(data.get('terrain'), ', '),
'population': utl.convert_to_int(data.get('population'))
}
def create_species(data):
"""Returns a new dictionary representation of a species from the passed in
< data >, converting string values to the appropriate type whenever possible.
Type conversions:
average_lifespan -> average_lifespan (str to int)
average_height -> average_height_cm (str to float)
Key order:
url
name
classification
designation
average_lifespan
average_height_cm
language
Parameters:
data (dict): source data
Returns:
dict: new dictionary
"""
for val in data.values():
val = utl.convert_to_none(val)
return {
'url': data.get('url'),
'name': data.get('name'),
'classification': data.get('classification'),
'designation': data.get('designation'),
'average_lifespan': utl.convert_to_int(data.get('average_lifespan')),
'average_height_cm': utl.convert_to_float(data.get('average_height')),
'language': data.get('language')
}
def create_starship(data):
"""Returns a new starship dictionary from the passed in < data >, converting string
values to the appropriate type whenever possible.
Assigning crews and passengers consitute separate
operations.
Type conversions:
length -> length_m (str to float)
max_atmosphering_speed -> max_atmosphering_speed (str to int)
hyperdrive_rating -> hyperdrive_rating (str to float)
MGLT -> MGLT (str to int)
crew -> crew_size (str to int)
passengers -> max_passengers (str to int)
armament -> armament (str to list)
cargo_capacity -> cargo_capacity_kg (str to int)
Key order:
url
name
model
starship_class
manufacturer
length_m
max_atmosphering_speed
hyperdrive_rating
top_speed_mglt
armament
crew_size
crew_members
max_passengers
passengers_on_board
cargo_capacity_kg
consumables
Parameters:
data (dict): source data
Returns:
dict: new dictionary
"""
for val in data.values():
val = utl.convert_to_none(val)
return {
'url': data.get('url'),
'name': data.get('name'),
'model': data.get('model'),
'starship_class': data.get('starship_class'),
'manufacturer': data.get('manufacturer'),
'length_m': utl.convert_to_float(data.get('length')),
'max_atmosphering_speed': utl.convert_to_int(data.get('max_atmosphering_speed')),
'hyperdrive_rating': utl.convert_to_float(data.get('hyperdrive_rating')),
'top_speed_mglt': utl.convert_to_int(data.get('MGLT')),
'armament': utl.convert_to_list(data.get('armament'), ','),
'crew_size': utl.convert_to_int(data.get('crew')),
'crew_members': data.get('crew_members'),
'max_passengers': utl.convert_to_int(data.get('passengers')),
'passengers_on_board': data.get('passengers_on_board'),
'cargo_capacity_kg': utl.convert_to_int(data.get('cargo_capacity')),
'consumables': data.get('consumables')
}
def get_wookieepedia_data(wookiee_data, filter):
"""Attempts to retrieve a Wookieepedia sourced dictionary representation of a
Star Wars entity (e.g., droid, person, planet, species, starship, or vehicle)
from the < wookiee_data > list using the passed in filter value. The function performs
a case-insensitive comparison of each nested dictionary's "name" value against the
passed in < filter > value. If a match is obtained the dictionary is returned to the
caller; otherwise None is returned.
Parameters:
wookiee_data (list): Wookieepedia-sourced data stored in a list of nested dictionaries
filter (str): name value used to match on a dictionary's "name" value
Returns
dict|None: Wookieepedia-sourced data dictionary if match on the filter is obtained;
otherwise returns None
"""
for data in wookiee_data:
if filter.lower() == data['name'].lower():
return data
else:
return None
def get_most_viewed_episode(episodes):
"""Identifies and returns a list of one or more episodes with the highest recorded
viewership. Ignores episodes with no viewship value. Includes in the list only those
episodes that tie for the highest recorded viewership. If no ties exist only one
episode will be returned in the list. Delegates to the function < has_viewer_data >
the task of determining if the episode includes viewership "episode_us_viewers_mm"
numeric data.
Parameters:
episodes (list): nested episode dictionaries
Returns:
list: episode(s) with the highest recorded viewership.
"""
viewer_count = 0
top_episode = 0
for i in range(len(episodes)):
if has_viewer_data(episodes[i]):
if episodes[i]['episode_us_viewers_mm'] > top_episode:
viewer_count = viewer_count + episodes[i]['episode_us_viewers_mm']
top_episode = episodes[i]['episode_us_viewers_mm']
i += 1
else:
i += 1
highest_recorded_viewership = []
for episode in episodes:
if episode['episode_us_viewers_mm'] == top_episode:
highest_recorded_viewership.append(episode)
return highest_recorded_viewership
def get_nyt_news_desks(articles):
"""Returns a list of New York Times news desks sourced from the passed in < articles >
list. Accesses the news desk name from each article's "news_desk" key-value pair. Filters
out duplicates in order to guarantee uniqueness.
Delegates to the function < utl.convert_to_none > the task of converting "news_desk"
values that equal "None" (a string) to None. Only news_desk values that are "truthy"
(i.e., not None) are returned in the list.
Parameters:
articles (list): nested dictionary representations of New York Times articles
Returns:
list: news desk strings (no duplicates)
"""
news_desks = []
for article in articles:
news_desk = utl.convert_to_none(article['news_desk'])
if news_desk not in news_desks and news_desk != None:
news_desks.append(news_desk)
return news_desks
def group_nyt_articles_by_news_desk(news_desks, articles):
"""Returns a dictionary of "news desk" key-value pairs that group the passed in
< articles > by their parent news desk. The passed in < news_desks > list provides
the keys while each news desk's < articles > are stored in a list and assigned to
the appropriate "news desk" key. Each key-value pair is structured as follows:
{
< news_desk_name_01 >: [{< article_01 >}, {< article_05 >}, ...],
< news_desk_name_02 >: [{< article_20 >}, {< article_31 >}, ...],
...
}
Each dictionary that represents an article is a "thinned" version of the New York Times
original and consists of the following key-value pairs ordered as follows:
Key order:
web_url
headline_main (new name)
news_desk
byline_original (new name)
document_type
material_type (new name)
abstract
word_count
pub_date
Parameters:
news_desks (list): list of news_desk names
articles (list): nested dictionary representations of New York Times articles
Returns
dict: key-value pairs that group articles by their parent news desk
"""
group_dict = {}
for news_desk in news_desks:
group = []
for article in articles:
if article['news_desk'] == news_desk:
group.append(
{
'web_url': article['web_url'],
'headline_main': article['headline']['main'],
'news_desk': article['news_desk'],
'byline_original': article['byline']['original'],
'document_type': article['document_type'],
'material_type': article['type_of_material'],
'abstract': article['abstract'],
'word_count': article['word_count'],
'pub_date': article['pub_date']
}
)
group_dict[news_desk] = group
return group_dict
def has_viewer_data(episode):
"""Checks the truth value of an episode's "episode_us_viewers_mm" key-value pair. Returns
True if the truth value is "truthy" (e.g., numeric values that are not 0, non-empty sequences
or dictionaries, boolean True); otherwise returns False if a "falsy" value is detected (e.g.,
empty sequences (including empty or blank strings), 0, 0.0, None, boolean False)).
Parameters:
episode (dict): represents an episode
Returns:
bool: True if "episode_us_viewers_mm" value is truthy; otherwise False
"""
if bool(episode.get('episode_us_viewers_mm')):
return True
else:
return False
def main():
"""Entry point for program.
Parameters:
None
Returns:
None
"""
# 9.1 CHALLENGE 01
# TODO Refactor utl.read_csv()
clone_wars = utl.read_csv('./clone_wars.csv')
# print(f"9.1.2\nclone_wars = {clone_wars}")
clone_wars_22 = clone_wars[1:5]
# print(f"9.1.3\nclone_wars_22 = {clone_wars_22}")
clone_wars_2012 = clone_wars[4:6]
# print(f"9.1.3\nclone_wars_2012 = {clone_wars_2012}")
clone_wars_url = clone_wars[-2][-1]
# print(f"9.1.3\nclone_wars_url = {clone_wars_url}")
clone_wars_even_num_seasons = clone_wars[2::2]
# print(f"9.1.3\nclone_wars_seasons = {clone_wars_even_num_seasons}")
# 9.2 Challenge 02
# TODO Implement convert_to_none(), convert_to_int(), convert_to_float(), convert_to_list()
# print(f"\nconvert_to_none -> None = {utl.convert_to_none(' N/A ')}")
# print(f"\nconvert_to_none -> None = {utl.convert_to_none('')}")
# print(f"\nconvert_to_none -> no change = {utl.convert_to_none('Yoda ')}")
# print(f"\nconvert_to_none -> no change = {utl.convert_to_none(5.5)}")
# print(f"\nconvert_to_none -> no change = {utl.convert_to_none((1, 2, 3))}")
# print(f"\nconvert_to_int -> int = {utl.convert_to_int('506 ')}")
# print(f"\nconvert_to_int -> None = {utl.convert_to_int(' unknown')}")
# print(f"\nconvert_to_int -> no change = {utl.convert_to_int([506, 507])}")
# print(f"\nconvert_to_float -> float = {utl.convert_to_float('5.5')}")
# print(f"\nconvert_to_float -> float = {utl.convert_to_float(5)}")
# print(f"\nconvert_to_float -> None = {utl.convert_to_float(' None')}")
# print(f"\nconvert_to_float -> no change = {utl.convert_to_float((1, 2, 3))}")
# print(f"\nconvert_to_float -> no change = {utl.convert_to_float(' Yoda')}")
# print(f"\nconvert_to_list -> None = {utl.convert_to_list(' N/A')}")
# print(f"\nconvert_to_list -> list = {utl.convert_to_list('a, b, c, d', ', ')}")
# print(f"\nconvert_to_list -> list = {utl.convert_to_list('a b c d')}")
# print(f"\nconvert_to_list -> no change = {utl.convert_to_list((1, 2, 3,))}")
# 9.3 CHALLENGE 03
# TODO Refactor utl.read_csv_to_dicts()
clone_wars_episodes = utl.read_csv_to_dicts('./clone_wars_episodes.csv')
# print(f"9.3.2\nclone_wars_episodes = {clone_wars_episodes}")
# TODO Implement has_viewer_data()
# TODO Implement loop
episode_count = 0
for i in range(len(clone_wars_episodes)):
if has_viewer_data(clone_wars_episodes[i]):
episode_count += 1
i += 1
else:
episode_count += 0
i += 1
# print(f"9.3.4\nepisode_count = {episode_count}")
# 9.4 Challenge 04
# TODO Implement convert_episode_values()
clone_wars_episodes = convert_episode_values(clone_wars_episodes)
utl.write_json('stu-clone_wars-episodes_converted.json', clone_wars_episodes)
# 9.5 Challenge 05
# TODO Implemennt get_most_viewed_episode()
most_viewed_episode = get_most_viewed_episode(clone_wars_episodes)
# print(f"9.5.2\nmost_viewed_episode = {most_viewed_episode}")
# 9.6 Challenge 06
# TODO Implement count_episodes_by_director()
director_episode_counts = count_episodes_by_director(clone_wars_episodes)
# print(f"9.6.2\ndirector_episode_counts = {director_episode_counts}")
utl.write_json('stu-clone_wars-director_episode_counts.json', director_episode_counts)
# 9.7 CHALLENGE 07
articles = utl.read_json('./nyt_star_wars_articles.json')
# TODO Implement get_nyt_news_desks()
news_desks = get_nyt_news_desks(articles)
# print(f"9.7.2\nnews_desks = {news_desks}")
utl.write_json('stu-nyt_news_desks.json', news_desks)
# 9.8 CHALLENGE 08
# TODO Implement group_nyt_articles_by_news_desk()
news_desk_articles = group_nyt_articles_by_news_desk(news_desks, articles)
utl.write_json('stu-nyt_news_desk_articles.json', news_desk_articles)
# 9.9 CHALLENGE 09
# TODO Implement calculate_articles_mean_word_count()
ignore = ('Business Day', 'Movies')
# TODO Implement loop
mean_word_counts = {}
for key, val in news_desk_articles.items():
if key not in ignore:
mean_word_counts[key] = calculate_articles_mean_word_count(val)
# print(f"9.9.2\nmean_word_counts = {mean_word_counts}")
utl.write_json('stu-nyt_news_desk_mean_word_counts.json', mean_word_counts)
# 9.10 CHALLENGE 10
# TODO Implement convert_gravity_value()
# print(f"\nconvert_gravity_value -> float = {utl.convert_gravity_value('1 standard')}")
# print(f"\nconvert_gravity_value -> None = {utl.convert_gravity_value('N/A')}")
# print(f"\nconvert_gravity_value -> float = {utl.convert_gravity_value('0.98')}")
# 9.11 CHALLENGE 11
# TODO Implement get_wookieepedia_data()
wookiee_planets = utl.read_csv_to_dicts('./wookieepedia_planets.csv')
wookiee_dagobah = get_wookieepedia_data(wookiee_planets, 'dagobah')
wookiee_haruun_kal = get_wookieepedia_data(wookiee_planets, 'HARUUN KAL')
# print(f"9.11.2\nwookiee_dagobah = {wookiee_dagobah}")
# print(f"9.11.2\nwookiee_haruun_kal = {wookiee_haruun_kal}")
utl.write_json('stu-wookiee_dagobah.json', wookiee_dagobah)
utl.write_json('stu-wookiee_haruun_kal.json', wookiee_haruun_kal)
# 9.12 CHALLENGE 12
# TODO Implement create_planet()
swapi_tatooine = utl.get_resource(utl.SWAPI_PLANETS, {'search': 'Tatooine'})['results'][0]
wookiee_tatooine = get_wookieepedia_data(wookiee_planets, 'Tatooine')
for key, val in wookiee_tatooine.items():
if val:
swapi_tatooine.update(wookiee_tatooine)
tatooine = create_planet(swapi_tatooine)
utl.write_json('stu-tatooine.json', tatooine)
# 9.13 CHALLENGE 13
# TODO Implement create_droid()
wookiee_droids = utl.read_json('./wookieepedia_droids.json')
swapi_r2_d2 = utl.get_resource(utl.SWAPI_PEOPLE, {'search': 'R2-D2'})['results'][0]
wookiee_r2_d2 = get_wookieepedia_data(wookiee_droids, 'R2-D2')
for key, val in wookiee_r2_d2.items():
if val:
swapi_r2_d2.update(wookiee_r2_d2)
r2_d2 = create_droid(swapi_r2_d2)
utl.write_json('stu-r2_d2.json', r2_d2)
# 9.14 Challenge 14
# TODO Implement create_species()
swapi_human_species = utl.get_resource(utl.SWAPI_SPECIES, {'search': 'Human'})['results'][0]
human_species = create_species(swapi_human_species)
utl.write_json('stu-human_species.json', human_species)
# 9.15 Challenge 15
# TODO Implement create_person()
# 9.15.2
wookiee_people = utl.read_json('./wookieepedia_people.json')
swapi_anakin = utl.get_resource(utl.SWAPI_PEOPLE, {'search': 'Anakin'})['results'][0]
wookiee_anakin = get_wookieepedia_data(wookiee_people, 'Anakin Skywalker')
for key, val in wookiee_anakin.items():
if val:
swapi_anakin.update(wookiee_anakin)
anakin = create_person(swapi_anakin, wookiee_planets)
utl.write_json('stu-anakin_skywalker.json', anakin)
# 9.16 CHALLENGE 16
# TODO Implement create_starship()
wookiee_starships = utl.read_csv_to_dicts('./wookieepedia_starships.csv')
wookiee_twilight = get_wookieepedia_data(wookiee_starships, 'Twilight')
twilight = create_starship(wookiee_twilight)
utl.write_json('stu-twilight.json', twilight)
# 9.17 CHALLENGE 17
# TODO Implement board_passengers()
swapi_padme = utl.get_resource(utl.SWAPI_PEOPLE, {'search': 'Padmé Amidala'})['results'][0]
# print(swapi_padme)
wookiee_padme = get_wookieepedia_data(wookiee_people, 'Padmé Amidala')
# print(wookiee_padme)
for key, val in wookiee_padme.items():
if val:
swapi_padme.update(wookiee_padme)
# print(swapi_padme)
padme = create_person(swapi_padme, wookiee_planets)
swapi_c_3po = utl.get_resource(utl.SWAPI_PEOPLE, {'search': 'C-3PO'})['results'][0]
wookiee_c_3po = get_wookieepedia_data(wookiee_droids, 'C-3PO')
for key, val in wookiee_c_3po.items():
if val:
swapi_c_3po.update(wookiee_c_3po)
c_3po = create_droid(swapi_c_3po)
# TODO Get passengers aboard the starship Twilight
twilight = board_passengers(twilight, [padme, c_3po, r2_d2])
# print(f"9.17.2\ntwilight = {twilight}")
# 9.18 CHALLENGE 18
# TODO Implement assign_crew_members()
swapi_obi_wan = utl.get_resource(utl.SWAPI_PEOPLE, {'search': 'Obi-Wan Kenobi'})['results'][0]
wookiee_obi_wan = get_wookieepedia_data(wookiee_people, 'Obi-Wan Kenobi')
for key, val in wookiee_obi_wan.items():
if val:
swapi_obi_wan.update(wookiee_obi_wan)
obi_wan = create_person(swapi_obi_wan, wookiee_planets)
# TODO Assign crew members to the starship Twilight
twilight = assign_crew_members(twilight, ['pilot', 'copilot'], [anakin, obi_wan])
# print(f"9.18.2\ntwilight = {twilight}")
# TODO Add r2_d2 instructions
r2_d2['instructions'] = ['Power up the engines']
# 10.0 ESCAPE
# TODO Add r2_d2 instruction (2nd order)
r2_d2['instructions'].append('Release the docking clamp')
# print(r2_d2)
# TODO Escape from the Malevolence (write to file)
utl.write_json('stu-twilight_departs.json', twilight)
# PERSIST CACHE (DO NOT COMMENT OUT)
utl.write_json(utl.CACHE_FILEPATH, utl.cache)
if __name__ == '__main__':
main()
|
Tianhongge/Python_SI506
|
last_assignment/swapi.py
|
swapi.py
|
py
| 30,085 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29214803056
|
from django.views.generic.simple import direct_to_template
from django.db.models import get_app, get_models, get_model
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.forms import ModelForm
from django.forms.models import modelform_factory
from django.forms import ModelForm, Textarea, TextInput,HiddenInput
def index(request,app):
if request.is_ajax():
model = get_model(app, request.GET.get('model'))
for field in model._meta.fields:
if field.get_internal_type()=='DateField':
wdg={field.get_attname(): TextInput(attrs={'class': 'datepicker1'})}
else:
wdg=None
form=modelform_factory(model,widgets=wdg)#{'date_joined': TextInput(attrs={'class': 'datepicker1'})})
id=None
f = form(request.POST or None,request.FILES or None,instance=id and model.objects.get(id=id))
if request.method == 'POST' and f.is_valid() :
f.save()
data=[]
data = model.objects.all().values_list()
fields = [field.verbose_name for field in model._meta.fields]
html = render_to_string('table.html', {'data': data,'fields':fields,'form':form,'model':model,'modelnm':model.__name__})
return HttpResponse(html)
else:
list_models = []
for model in get_models(get_app(app)):
list_models.append([model.__name__, model._meta.verbose_name_plural])
return direct_to_template(request, 'index.html', {'models': list_models,})
|
dest81/test_dm
|
dynamic_models/views.py
|
views.py
|
py
| 1,631 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20398018702
|
import wx
[wxID_NUMBERINGPANEL, wxID_NUMBERINGPANELALPHA, wxID_NUMBERINGPANELALPHA_PAD,
wxID_NUMBERINGPANELALPHA_UC, wxID_NUMBERINGPANELASC,
wxID_NUMBERINGPANELCOUNT, wxID_NUMBERINGPANELCOUNTBYDIR,
wxID_NUMBERINGPANELDESC, wxID_NUMBERINGPANELDIGIT,
wxID_NUMBERINGPANELDIGIT_AUTOPAD, wxID_NUMBERINGPANELDIGIT_PAD,
wxID_NUMBERINGPANELDIGIT_SETPAD, wxID_NUMBERINGPANELDOWNBOTTOM,
wxID_NUMBERINGPANELDOWNBUTTON, wxID_NUMBERINGPANELDOWNMORE,
wxID_NUMBERINGPANELORDER, wxID_NUMBERINGPANELPAD_CHAR,
wxID_NUMBERINGPANELPAD_WIDTH, wxID_NUMBERINGPANELRESET,
wxID_NUMBERINGPANELRESETDIR, wxID_NUMBERINGPANELROMAN,
wxID_NUMBERINGPANELROMAN_UC, wxID_NUMBERINGPANELSORTING,
wxID_NUMBERINGPANELSORT_TEXT, wxID_NUMBERINGPANELSTART,
wxID_NUMBERINGPANELSTARTBYITEMS, wxID_NUMBERINGPANELSTATICTEXT1,
wxID_NUMBERINGPANELSTATICTEXT2, wxID_NUMBERINGPANELSTATICTEXT5,
wxID_NUMBERINGPANELSTATICTEXT6, wxID_NUMBERINGPANELSTATICTEXT7,
wxID_NUMBERINGPANELSTEP, wxID_NUMBERINGPANELSTYLE,
wxID_NUMBERINGPANELUPBUTTON, wxID_NUMBERINGPANELUPMORE,
wxID_NUMBERINGPANELUPTOP, wxID_NUMBERINGPANELSTARTBYITEM
] = [wx.NewId() for _init_ctrls in range(37)]
class numberingPanel(wx.Panel):
def sizer(self):
#>> start style box:
sLine1 = wx.BoxSizer(wx.HORIZONTAL)
line1elements = [(self.digit,10),
(self.digit_pad,0),
(self.pad_char,5)]
if main.langLTR:
for i in line1elements:
sLine1.Add(i[0],0,wx.ALIGN_CENTER|wx.RIGHT,i[1])
else:
line1elements.reverse()
for i in line1elements:
sLine1.Add(i[0],0,wx.ALIGN_CENTER|wx.LEFT,i[1])
sLine1.Add((5,-1),0)
sLine3 = wx.BoxSizer(wx.HORIZONTAL)
if main.langLTR:
sLine3.Add(self.digit_setpad,0,wx.ALIGN_CENTER)
sLine3.Add(self.pad_width,0)
else:
sLine3.Add(self.pad_width,0)
sLine3.Add(self.digit_setpad,0,wx.ALIGN_CENTER)
sLine4 = wx.BoxSizer(wx.HORIZONTAL)
line4elements = [(self.alpha,10),
(self.alpha_uc,5),
(self.alpha_pad,10),]
if main.langLTR:
for i in line4elements:
sLine4.Add(i[0],0,wx.ALIGN_CENTER|wx.RIGHT,i[1])
else:
line4elements.reverse()
for i in line4elements:
sLine4.Add(i[0],0,wx.ALIGN_CENTER|wx.LEFT,i[1])
sLine4.Add((5,-1),0)
sLine5 = wx.BoxSizer(wx.HORIZONTAL)
if main.langLTR:
sLine5.Add(self.roman,0,wx.RIGHT,10)
sLine5.Add(self.roman_uc,0)
else:
sLine5.Add(self.roman_uc,0,wx.RIGHT,10)
sLine5.Add(self.roman,0,wx.RIGHT,5)
styleSizer = wx.StaticBoxSizer(self.style, wx.VERTICAL)
styleSizer.Add(sLine1,0,wx.TOP|wx.BOTTOM|main.alignment,7)
styleSizer.Add(self.digit_autopad,0,wx.LEFT|wx.RIGHT|main.alignment,20)
styleSizer.Add((1,7))
styleSizer.Add(sLine3,0,wx.LEFT|wx.RIGHT|main.alignment,20)
styleSizer.Add(sLine4,0,wx.BOTTOM|wx.TOP|main.alignment,25)
styleSizer.Add(sLine5,0,wx.BOTTOM|main.alignment,10)
#<< end style box
#>> start order box:
oLine1 = wx.BoxSizer(wx.HORIZONTAL)
if main.langLTR:
oLine1.Add(self.sort_text,0,wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT,5)
oLine1.Add(self.sorting,0,wx.ALIGN_CENTER)
else:
oLine1.Add((-1,-1),1)
oLine1.Add(self.sorting,0,wx.ALIGN_CENTER)
oLine1.Add(self.sort_text,0,wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT,5)
oLine2 = wx.BoxSizer(wx.HORIZONTAL)
oLine2elements = [((5,-1),0),
(self.staticText2,5),
(self.upButton,2),
(self.downButton,0),
((5,-1),0),
(self.upMore,2),
(self.downMore,0),
((5,-1),0),
(self.upTop,2),
(self.downBottom,0),
((5,-1),0)]
if main.langLTR:
for i in oLine2elements:
oLine2.Add(i[0],0,wx.ALIGN_CENTER|wx.RIGHT,i[1])
else:
oLine2elements.reverse()
for i in oLine2elements:
oLine2.Add(i[0],0,wx.ALIGN_CENTER|wx.LEFT,i[1])
orderSizer = self.orderSizer = wx.StaticBoxSizer(self.order, wx.VERTICAL)
orderSizer.Add((-1,3),0)
orderSizer.Add(oLine1,0,wx.BOTTOM|wx.EXPAND,10)
orderSizer.Add(oLine2,0,wx.BOTTOM,4)
#<< end order box
#>> start count box:
countDir = wx.BoxSizer(wx.HORIZONTAL)
countDir.Add(self.asc,3)
countDir.Add((-1,-1),1)
countDir.Add(self.desc,3)
countSizer = wx.FlexGridSizer(cols=2, vgap=3, hgap=5)
countElements = [[self.staticText5,
self.start],
[(-1,-1),
self.startByItems],
[(-1,5),(-1,5)],
[self.staticText6,
countDir],
[(-1,5),(-1,5)],
[self.staticText7,
self.step],
[(-1,-1),
self.countByDir],
[(-1,15),(-1,15)],
[self.staticText1,
self.reset],
[(-1,-1),
self.resetDir],
]
for row in countElements:
if not main.langLTR:
row.reverse()
for i in row:
countSizer.Add(i,0,wx.EXPAND|main.alignment)
countBoxSizer = wx.StaticBoxSizer(self.count, wx.VERTICAL)
countBoxSizer.Add(countSizer,0,wx.ALL,7)
#<< end count box
# main sizer and finish:
mainSizer = self.mainSizer = wx.BoxSizer(wx.HORIZONTAL)
leftSizer = self.leftSizer = wx.BoxSizer(wx.VERTICAL)
leftSizer.Add(styleSizer,0,wx.EXPAND)
leftSizer.Add(orderSizer,0,wx.EXPAND|wx.TOP,10)
mainElements = [((10,-1),0),
(leftSizer,7),
((25,-1),0),
(countBoxSizer,30)]
if main.langLTR:
for i in mainElements:
mainSizer.Add(i[0],0,wx.TOP,i[1])
else:
mainElements.reverse()
mainSizer.Add((-1,-1),1)
for i in mainElements:
mainSizer.Add(i[0],0,wx.TOP,i[1])
self.SetSizerAndFit(mainSizer)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Panel.__init__(self, id=wxID_NUMBERINGPANEL, name=u'numberingPanel',
parent=prnt, pos=wx.Point(346, 305), size=wx.Size(642, 357),
style=wx.TAB_TRAVERSAL)
self.SetClientSize(wx.Size(634, 328))
self.count = wx.StaticBox(id=wxID_NUMBERINGPANELCOUNT,
label=_(u"Counter:"), name=u'count', parent=self,
pos=wx.Point(328, 16), style=main.alignment)
self.order = wx.StaticBox(id=wxID_NUMBERINGPANELORDER, label=_(u"Item Sorting:"),
name=u'order', parent=self, pos=wx.Point(16, 208),
size=wx.Size(280, 88), style=main.alignment)
self.style = wx.StaticBox(id=wxID_NUMBERINGPANELSTYLE,
label=_(u"Style:"), name=u'style', parent=self, pos=wx.Point(16,
16), style=main.alignment)
self.digit = wx.RadioButton(id=wxID_NUMBERINGPANELDIGIT,
label=_(u"Numerical:"), name=u'digit', parent=self,
pos=wx.Point(32, 48), style=wx.RB_GROUP)
self.digit.SetValue(True)
self.digit.Bind(wx.EVT_RADIOBUTTON, self.check_styles,
id=wxID_NUMBERINGPANELDIGIT)
self.alpha = wx.RadioButton(id=wxID_NUMBERINGPANELALPHA,
label=_(u"Alphabetical:"), name=u'alpha', parent=self,
pos=wx.Point(32, 112), style=0)
self.alpha.SetValue(False)
self.alpha.Enable(True)
self.alpha.SetToolTipString(_(u"Must start at positive value: (1=a, 28=ab, etc..)"))
self.alpha.Bind(wx.EVT_RADIOBUTTON, self.check_styles)
self.roman = wx.RadioButton(id=wxID_NUMBERINGPANELROMAN,
label=_(u"Roman Numeral:"), name=u'roman', parent=self,
pos=wx.Point(32, 144), style=0)
self.roman.SetValue(False)
self.roman.SetToolTipString(_(u"Count values must be between 1 and 4999"))
self.roman.Bind(wx.EVT_RADIOBUTTON, self.check_styles)
self.digit_pad = wx.CheckBox(id=wxID_NUMBERINGPANELDIGIT_PAD,
label=_(u"Pad, using:"), name=u'digit_pad', parent=self,
pos=wx.Point(112, 48), style=0)
self.digit_pad.SetValue(True)
self.digit_pad.Bind(wx.EVT_CHECKBOX, self.check_styles)
self.pad_char = wx.TextCtrl(id=wxID_NUMBERINGPANELPAD_CHAR,
name=u'pad_char', parent=self, pos=wx.Point(185, 47),
size=wx.Size(24, -1), style=0, value='0')
self.pad_char.SetMaxLength(1)
self.pad_char.Bind(wx.EVT_TEXT, main.showPreview)
self.alpha_pad = wx.CheckBox(id=wxID_NUMBERINGPANELALPHA_PAD,
label=_(u"auto pad"), name=u'alpha_pad', parent=self,
pos=wx.Point(216, 112), style=0)
self.alpha_pad.SetValue(True)
self.alpha_pad.Enable(False)
self.alpha_pad.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.pad_width = wx.SpinCtrl(id=wxID_NUMBERINGPANELPAD_WIDTH, initial=3,
max=255, min=1, name=u'pad_width', parent=self, pos=wx.Point(161,
82), size=wx.Size(55, -1), style=wx.SP_ARROW_KEYS|wx.TE_PROCESS_ENTER)
self.pad_width.SetValue(3)
self.pad_width.SetRange(1, 255)
self.pad_width.Bind(wx.EVT_TEXT_ENTER, self.OnPad_widthSpinctrl)
self.pad_width.Bind(wx.EVT_SPINCTRL, self.OnPad_widthSpinctrl)
self.roman_uc = wx.CheckBox(id=wxID_NUMBERINGPANELROMAN_UC,
label=_(u"Uppercase"), name=u'roman_uc', parent=self,
pos=wx.Point(152, 144), style=0)
self.roman_uc.SetValue(True)
self.roman_uc.Enable(False)
self.roman_uc.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.sort_text = wx.StaticText(id=wxID_NUMBERINGPANELSORT_TEXT,
label=_(u"Sort all items:"), name=u'sort_text', parent=self,
pos=wx.Point(24, 232), style=0)
self.staticText2 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT2,
label=_(u"Manually adjust item:"), name=u'staticText2', parent=self,
pos=wx.Point(24, 264), style=0)
self.downButton = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/down.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELDOWNBUTTON,
name=u'downButton', parent=self, pos=wx.Point(152, 256), style=wx.BU_AUTODRAW)
self.downButton.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELDOWNBUTTON)
self.upButton = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/up.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELUPBUTTON,
name=u'upButton', parent=self, pos=wx.Point(128, 256), style=wx.BU_AUTODRAW)
self.upButton.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELUPBUTTON)
self.upTop = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/upAll.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELUPTOP, name=u'upTop',
parent=self, pos=wx.Point(240, 256), style=wx.BU_AUTODRAW)
self.upTop.SetToolTipString(_(u"move to top"))
self.upTop.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELUPTOP)
self.downBottom = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/downAll.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELDOWNBOTTOM,
name=u'downBottom', parent=self, pos=wx.Point(264, 256), style=wx.BU_AUTODRAW)
self.downBottom.SetToolTipString(_(u"move to bottom"))
self.downBottom.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELDOWNBOTTOM)
self.upMore = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/up5.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELUPMORE, name=u'upMore',
parent=self, pos=wx.Point(184, 256), style=wx.BU_AUTODRAW)
self.upMore.SetToolTipString(_(u"move by 5"))
self.upMore.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELUPMORE)
self.downMore = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/down5.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELDOWNMORE,
name=u'downMore', parent=self, pos=wx.Point(208, 256), style=wx.BU_AUTODRAW)
self.downMore.SetToolTipString(_(u"move by 5"))
self.downMore.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELDOWNMORE)
self.sorting = wx.Choice(choices=[ _(u"Ascending"), _(u"Descending"),
_(u"Manually")], id=wxID_NUMBERINGPANELSORTING, name=u'sorting',
parent=self, pos=wx.Point(160, 224), style=0)
self.sorting.SetSelection(0)
self.sorting.Bind(wx.EVT_CHOICE, self.setSortingOptions,
id=wxID_NUMBERINGPANELSORTING)
self.staticText5 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT5,
label=_(u"Start at:"), name=u'staticText5', parent=self,
pos=wx.Point(352, 43), style=0)
self.step = wx.SpinCtrl(id=wxID_NUMBERINGPANELSTEP, initial=1,
max=10000000, min=1, name=u'step', parent=self, pos=wx.Point(416,
136), size=wx.Size(168, -1), style=wx.SP_ARROW_KEYS)
self.step.SetValue(1)
self.step.SetToolTipString(_(u"A.K.A. step size"))
self.step.Bind(wx.EVT_TEXT_ENTER, main.showPreview)
self.step.Bind(wx.EVT_SPINCTRL, main.showPreview)
self.staticText7 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT7,
label=_(u"Count by:"), name=u'staticText7', parent=self,
pos=wx.Point(344, 142), style=0)
self.asc = wx.RadioButton(id=wxID_NUMBERINGPANELASC, label=_(u"+"),
name=u'asc', parent=self, pos=wx.Point(504, 104),
style=wx.RB_GROUP)
self.asc.SetFont(wx.Font(17, wx.SWISS, wx.NORMAL, wx.BOLD, False))
self.asc.SetValue(True)
self.asc.SetToolTipString(_(u"Increase counting number."))
self.asc.Bind(wx.EVT_RADIOBUTTON, main.showPreview)
self.desc = wx.RadioButton(id=wxID_NUMBERINGPANELDESC, label=_(u"-"),
name=u'desc', parent=self, pos=wx.Point(552, 104), style=0)
self.desc.SetFont(wx.Font(15, wx.SWISS, wx.NORMAL, wx.BOLD, False,
u'Impact'))
self.desc.SetValue(False)
self.desc.SetToolTipString(_(u"Decrease counting number."))
self.desc.Bind(wx.EVT_RADIOBUTTON, main.showPreview)
self.staticText6 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT6,
label=_(u"Count:"), name=u'staticText6', parent=self,
pos=wx.Point(360, 104), style=0)
self.alpha_uc = wx.CheckBox(id=wxID_NUMBERINGPANELALPHA_UC,
label=_(u"Uppercase"), name=u'alpha_uc', parent=self,
pos=wx.Point(136, 112), style=0)
self.alpha_uc.SetValue(False)
self.alpha_uc.Enable(False)
self.alpha_uc.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.start = wx.SpinCtrl(id=wxID_NUMBERINGPANELSTART, initial=0,
max=100000000, min=0, name=u'start', parent=self,
pos=wx.Point(416, 40), size=wx.Size(168, -1),
style=wx.SP_ARROW_KEYS)
self.start.SetValue(1)
self.start.SetToolTipString(_(u"starting number or equivalent alpha/roman character"))
self.start.Bind(wx.EVT_TEXT_ENTER, main.showPreview, id=wxID_NUMBERINGPANELSTART)
self.start.Bind(wx.EVT_SPINCTRL, main.showPreview, id=wxID_NUMBERINGPANELSTART)
self.staticText1 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT1,
label=_(u"Reset every:"), name=u'staticText1', parent=self,
pos=wx.Point(344, 203), style=0)
self.reset = wx.SpinCtrl(id=wxID_NUMBERINGPANELRESET, initial=0,
max=100000000, min=0, name=u'reset', parent=self,
pos=wx.Point(416, 200), size=wx.Size(168, -1),
style=wx.SP_ARROW_KEYS)
self.reset.SetValue(0)
self.reset.SetToolTipString(_(u"0 = don't reset"))
self.reset.SetRange(0, 100000000)
self.reset.Bind(wx.EVT_TEXT_ENTER, main.showPreview,
id=wxID_NUMBERINGPANELRESET)
self.reset.Bind(wx.EVT_SPINCTRL, main.showPreview,
id=wxID_NUMBERINGPANELRESET)
self.digit_autopad = wx.RadioButton(id=wxID_NUMBERINGPANELDIGIT_AUTOPAD,
label=_(u"Auto pad"), name=u'digit_autopad', parent=self,
pos=wx.Point(56, 68), style=wx.RB_GROUP)
self.digit_autopad.SetValue(True)
self.digit_autopad.Bind(wx.EVT_RADIOBUTTON, self.check_styles)
self.digit_setpad = wx.RadioButton(id=wxID_NUMBERINGPANELDIGIT_SETPAD,
label=_(u"Fixed pad width:"), name=u'digit_setpad', parent=self,
style=0)
self.digit_setpad.SetValue(False)
self.digit_setpad.Bind(wx.EVT_RADIOBUTTON, self.check_styles)
self.resetDir = wx.CheckBox(id=wxID_NUMBERINGPANELRESETDIR,
label=_(u"Reset every directory"), name=u'resetDir', parent=self,
pos=wx.Point(456, 232), style=0)
self.resetDir.SetToolTipString(_(u"Reset count to initial value when directory changes."))
self.resetDir.SetValue(False)
self.resetDir.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.countByDir = wx.CheckBox(id=wxID_NUMBERINGPANELCOUNTBYDIR,
label=_(u"Count by directory"), name=u'countByDir', parent=self,
pos=wx.Point(472, 168), style=0)
self.countByDir.SetToolTipString(_(u"Only increase/decrease count when directory changes."))
self.countByDir.SetValue(False)
self.countByDir.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.startByItems = wx.CheckBox(id=wxID_NUMBERINGPANELSTARTBYITEM,
label=_(u"Start at number of items"), name=u'start_by_item',
parent=self, pos=wx.Point(440, 72), style=0)
self.startByItems.SetValue(False)
self.startByItems.SetToolTipString(_(u"Use number of items as starting point for count."))
self.startByItems.Bind(wx.EVT_CHECKBOX, self.OnStartByItemsCheckbox,
id=wxID_NUMBERINGPANELSTARTBYITEM)
def __init__(self, parent, main_window):
global main
main = main_window
self._init_ctrls(parent)
self.sizer()
self.setSortingOptions(0)
# determine style:
def GetNumberStyle(self):
#digit:
style = ''
if self.digit.GetValue():
pad = self.digit_pad.GetValue()
pad_char = self.pad_char.GetValue()
if self.digit_setpad.GetValue():
pad_width = self.pad_width.GetValue()
else:
pad_width = u"auto"
style = (u"digit", pad_char, pad_width, pad)
#alphabetical:
elif self.alpha.GetValue():
style = (u"alpha", self.alpha_uc.GetValue(), self.alpha_pad.GetValue())
#roman numeral:
elif self.roman.GetValue():
style = (u"roman", self.roman_uc.GetValue())
return style
# determine parameters:
def GetNumberParams(self):
#ascending:
if self.asc.GetValue() == True:
step_dir = +int(self.step.GetValue())
#descending:
else:
step_dir = -int(self.step.GetValue())
params = (self.start.GetValue(), step_dir, self.reset.GetValue(),
self.resetDir.GetValue(), self.countByDir.GetValue(),
self.startByItems.GetValue(),)
return params
# enables/disables item position change buttons:
def setSortingOptions(self, event):
sortButtons = (self.staticText2,self.upButton, self.downButton,
self.upMore,self.downMore,self.upTop,self.downBottom,)
if self.sorting.GetSelection() == 2:
for item in sortButtons:
item.Enable(True)
else:
for item in sortButtons:
item.Enable(False)
main.showPreview(event)
#enable/disable options based on what is selected:
def check_styles(self, event):
#digit:
digit_options = (self.digit_pad, self.pad_char, self.digit_setpad,
self.digit_autopad, self.pad_width)
pad_options = (self.digit_setpad, self.digit_autopad, self.pad_char,
self.pad_width)
if self.digit.GetValue():
self.digit_pad.Enable(True)
if self.digit_pad.GetValue():
for option in pad_options:
option.Enable(True)
else:
for option in pad_options:
option.Enable(False)
if self.reset.GetValue() == 4999:
self.reset.SetValue(0)
else:
for option in digit_options:
option.Enable(False)
#roman numeral:
if self.roman.GetValue():
self.roman_uc.Enable(True)
if self.reset.GetValue() > 4999:
self.reset.SetValue(4999)
if self.start.GetValue() == 0:
self.start.SetValue(1)
else:
self.roman_uc.Enable(False)
#alphabetical:
if self.alpha.GetValue():
self.alpha_uc.Enable(True)
self.alpha_pad.Enable(True)
if self.start.GetValue() == 0:
self.start.SetValue(1)
if self.reset.GetValue() == 4999:
self.reset.SetValue(0)
else:
self.alpha_uc.Enable(False)
self.alpha_pad.Enable(False)
main.showPreview(event)
def OnStartByItemsCheckbox(self, event):
if self.startByItems.GetValue():
self.start.Enable(False)
else:
self.start.Enable(True)
main.showPreview(event)
def OnPad_widthSpinctrl(self, event):
self.digit_setpad.SetValue(True)
main.showPreview(event)
# triggered when a button to change item position is clicked
def changeItemOrder(self, event):
buttons = {
wxID_NUMBERINGPANELUPBUTTON : -1,
wxID_NUMBERINGPANELDOWNBUTTON : 1,
wxID_NUMBERINGPANELUPMORE : -5,
wxID_NUMBERINGPANELDOWNMORE : 5,
wxID_NUMBERINGPANELUPTOP : u'top',
wxID_NUMBERINGPANELDOWNBOTTOM : u'bottom',
}
change = buttons[event.GetId()]
main.changeItemOrder(change)
###### GET/SET CONFIGURATION SETTINGS: #########################################
def getSettings(self):
settings = (u"<[numbering]>",
u"digit>:>%s" %int(self.digit.GetValue()),
u"digit_pad>:>%s" %int(self.digit_pad.GetValue()),
u"pad_char>:>%s" %self.pad_char.GetValue(),
u"digit_setpad>:>%s" %int(self.digit_setpad.GetValue()),
u"digit_autopad>:>%s" %int(self.digit_autopad.GetValue()),
u"pad_width>:>%s" %self.pad_width.GetValue(),
u"alpha>:>%s" %int(self.alpha.GetValue()),
u"alpha_uc>:>%s" %int(self.alpha_uc.GetValue()),
u"alpha_pad>:>%s" %int(self.alpha_pad.GetValue()),
u"roman>:>%s" %int(self.roman.GetValue()),
u"roman_uc>:>%s" %int(self.roman_uc.GetValue()),
u"start>:>%s" %self.start.GetValue(),
u"asc>:>%s" %int(self.asc.GetValue()),
u"desc>:>%s" %int(self.desc.GetValue()),
u"step>:>%s" %int(self.step.GetValue()),
u"reset>:>%s" %int(self.reset.GetValue()),
u"resetDir>:>%s" %int(self.resetDir.GetValue()),
u"countByDir>:>%s" %int(self.countByDir.GetValue()),
u"startByItems>:>%s" %int(self.startByItems.GetValue()),
u"sorting>:>%s" %self.sorting.GetSelection(),
)
return settings
def setSettings(self,settings):
if len(settings) == 20: #make sure number of settings is correct
try:
self.digit.SetValue(int(settings[0]))
self.digit_pad.SetValue(int(settings[1]))
self.pad_char.SetValue(settings[2])
self.digit_setpad.SetValue(int(settings[3]))
self.digit_autopad.SetValue(int(settings[4]))
self.pad_width.SetValue(int(settings[5]))
self.alpha.SetValue(int(settings[6]))
self.alpha_uc.SetValue(int(settings[7]))
self.alpha_pad.SetValue(int(settings[8]))
self.roman.SetValue(int(settings[9]))
self.roman_uc.SetValue(int(settings[10]))
self.start.SetValue(int(settings[11]))
self.asc.SetValue(int(settings[12]))
self.desc.SetValue(int(settings[13]))
self.step.SetValue(int(settings[14]))
self.reset.SetValue(int(settings[15]))
self.resetDir.SetValue(int(settings[16]))
self.countByDir.SetValue(int(settings[17]))
self.startByItems.SetValue(int(settings[18]))
self.sorting.SetSelection(int(settings[19].replace(u'\n','')))
except ValueError:
return False
else:
# apply settings:
self.check_styles(0)
self.setSortingOptions(0)
return True
else:
return False
|
metamorphose/metamorphose1
|
numbering.py
|
numbering.py
|
py
| 25,919 |
python
|
en
|
code
| 7 |
github-code
|
6
|
11192585843
|
"""
CP1404 Prac 07 - More Guitars!
Estimated Time: 30 minutes
Actual time 50 minutes
"""
from prac_06.guitar import Guitar
def main():
"""Guitars - keep track of guitars and sort them."""
guitars = []
load_file(guitars)
for guitar in guitars:
print(guitar)
guitars.sort()
for guitar in guitars:
print(guitar)
name = input("Name: ")
while name != "":
year = int(input("Year: "))
price = float(input("Price: "))
guitars.append(Guitar(name, year, price))
name = input("Name: ")
save_file(guitars)
def load_file(guitars):
"""Load a file."""
in_file = open("guitars.csv", 'r')
for line in in_file:
parts = line.strip().split(",")
price = float(parts[2])
guitar = Guitar(parts[0], parts[1], price)
guitars.append(guitar)
in_file.close()
def save_file(guitars):
"""Save a file."""
out_file = open("guitars.csv", 'w')
for guitar in guitars:
print(f"{guitar.name},{guitar.year},{guitar.cost}", file=out_file)
main()
|
alexdamrow/cp1404practicals
|
prac_07/myguitars.py
|
myguitars.py
|
py
| 1,067 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8209601687
|
import os
import pytest
import requests_mock
from newsApi.models.request import EverythingRequestModel, Language, TopHeadlinesRequestModel, SourcesRequestModel
from newsApi.models.response import EverythingResponseModel, TopHeadlinesResponseModel, SourcesResponseModel
from newsApi.service import NewsAPIService
@pytest.fixture
def news_api_service():
return NewsAPIService(os.environ.get('NEWS_API_KEY'))
def test_everything(news_api_service):
request_model = EverythingRequestModel(q="test")
with requests_mock.Mocker() as m:
m.get('https://newsapi.org/v2/everything', json={"status": "ok", "totalResults": 10, "articles": []})
response = news_api_service.everything(request_model)
assert isinstance(response, EverythingResponseModel)
assert response.status == 'ok'
assert response.totalResults == 10
def test_top_headlines(news_api_service):
request_model = TopHeadlinesRequestModel(q="test")
with requests_mock.Mocker() as m:
m.get('https://newsapi.org/v2/top-headlines', json={"status": "ok", "totalResults": 10, "articles": []})
response = news_api_service.top_headlines(request_model)
assert isinstance(response, TopHeadlinesResponseModel)
assert response.status == 'ok'
assert response.totalResults == 10
def test_sources(news_api_service):
request_model = SourcesRequestModel(language=Language.EN)
with requests_mock.Mocker() as m:
m.get('https://newsapi.org/v2/sources', json={"status": "ok", "sources": []})
response = news_api_service.sources(request_model)
assert isinstance(response, SourcesResponseModel)
assert response.status == 'ok'
|
roachseb/NewsAPI-Python-Client
|
tests/test_news_service.py
|
test_news_service.py
|
py
| 1,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22250933699
|
n = 0
s = '9' * 65
while '999' in s or '222' in s:
if '222' in s:
s = s.replace('222', '9', 1)
else:
s = s.replace('999', '2', 1)
for i in s:
num = int(i)
n += num
print(n)
|
MakinFantasy/xo
|
12/16.06/4.py
|
4.py
|
py
| 205 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70374265789
|
from owlready2 import *
import csv
def loadOntology(path):
onto = get_ontology(path).load()
print("Loaded " + onto.base_iri)
return onto
def get_parents(ontology, cls):
# print(cls.name)
return cls.ancestors(include_self=False)
if __name__ == '__main__':
path_to_ontology = "file:///Users/danielle.welter/Ontologies/COVID_ontology/COVID-merged.owl"
ontology = loadOntology(path_to_ontology)
all_classes = list(ontology.classes())
# print(len(all_classes))
path_to_file = "/Users/danielle.welter/Documents/TaggerDictionaries/full_dictionary/covid_dictionary/covid_entities.tsv"
path_to_entities = "/Users/danielle.welter/Documents/TaggerDictionaries/full_dictionary/full_entities.tsv"
path_to_groups = "/Users/danielle.welter/Documents/TaggerDictionaries/full_dictionary/full_groups.tsv"
covid_ids = {}
tagger_ids = {}
tagger_groups = {}
with open(path_to_file) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
for row in reader:
covid_ids[row[2]] = row[0]
with open(path_to_entities) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
for row in reader:
tagger_ids[row[2]] = row[0]
with open(path_to_groups) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
for row in reader:
if row[0] not in tagger_groups.keys():
tagger_groups[row[0]] = []
tagger_groups[row[0]].append(row[1])
with open('../output/new_covid_groups.tsv', 'w') as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
with open('../output/existing_covid_groups.tsv', 'w') as extra_file:
extra_writer = csv.writer(extra_file, delimiter='\t')
with open('../output/other_covid_classes.tsv', 'w') as second_out_file:
second_writer = csv.writer(second_out_file, delimiter='\t')
for cls in all_classes:
if 'APOLLO_SV' in cls.name:
n = cls.name.replace('APOLLO_SV_', 'APOLLO_SV:')
elif 'NCBITaxon' in cls.name:
n = cls.name.replace('NCBITaxon_', '')
else:
n = cls.name.replace('_', ':')
if n in covid_ids.keys():
parents = get_parents(ontology, cls)
for par in parents:
if 'APOLLO_SV' in par.name:
p = par.name.replace('APOLLO_SV_', 'APOLLO_SV:')
elif 'NCBITaxon' in par.name:
p = par.name.replace('NCBITaxon_', '')
else:
p = par.name.replace('_', ':')
if p in covid_ids.keys():
if covid_ids[n] not in tagger_groups or id not in tagger_groups[covid_ids[n]]:
tsv_writer.writerow([covid_ids[n], covid_ids[p]])
else:
extra_writer.writerow([covid_ids[n], p])
elif p in tagger_ids.keys():
if covid_ids[n] not in tagger_groups or id not in tagger_groups[covid_ids[n]]:
tsv_writer.writerow([covid_ids[n], tagger_ids[p]])
else:
extra_writer.writerow([covid_ids[n], p])
else:
second_writer.writerow([n, covid_ids[n], p])
else:
second_writer.writerow([n,'n/a','n/a'])
|
daniwelter/python_owl_sandbox
|
ontology_extractor/covid_parent_accessor.py
|
covid_parent_accessor.py
|
py
| 3,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30347048151
|
#from exam_word import word_04
from .exam_func import TEST, MEAN,EXAMPLE, EXAMPLE_test, SIMILAR
import json
from collections import OrderedDict
from .W2V_word import W2V
import random
# word = word_04
def REMOVE(st):
row = ' '.join(s for s in st)
remove = "}"
for x in range(len(remove)):
row1 = row.replace(remove[x],"")
row2 = row1.replace("'","")
row3 = row2.split('.')
strip_li = []
for i in row3:
i = i.strip()
if i:
strip_li.append(i)
return strip_li
def W2V_MEAN(w2v_word):
n = len(w2v_word)
w2v_mean = []
for i in range(0,n):
t = TEST(w2v_word[i])
result_parse = t[0]
row1 = MEAN(result_parse)
row2 = REMOVE(row1)
w2v_mean.append(row2)
return w2v_mean
def t04(word):
w2v_word = W2V(word)
t = TEST(word)
result_parse = t[0]
result_similar = t[2]
result_similar = random.choice(result_similar)
mean = result_similar['예시']
similar = result_similar['유의어']
similar_list = []
similar_list.append(similar)
w2v_mean = W2V_MEAN(w2v_word) + W2V_MEAN(similar_list)
w2v_word = w2v_word + similar_list
choice = { name:value for name, value in zip(w2v_word, w2v_mean) }
file_exam4 = OrderedDict()
file_exam4["TYPE4"] = "다음 문장 속 "+word+"의 의미와 가장 관련이 깊은 단어를 고르시오."
#file_exam4["WORD"] = word #단어
file_exam4["ANSWER"] = similar #유의어
file_exam4["MEAN"] = mean #뜻
file_exam4["CHOICE"] = choice
EXAM4 = json.dumps(file_exam4, ensure_ascii=False, indent="\t")
print(EXAM4)
return EXAM4
|
GeulNoon/server
|
geulnoon/Word/test04.py
|
test04.py
|
py
| 1,699 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2103471277
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
sns.set_context(rc={'figure.figsize': (9, 9)}, font_scale=2.)
def load_embeddings(filename):
"""
Load a DataFrame from the generalized text format used by word2vec, GloVe,
fastText, and ConceptNet Numberbatch. The main point where they differ is
whether there is an initial line with the dimensions of the matrix.
"""
labels = []
rows = []
with open(filename, encoding='utf-8') as infile:
for i, line in enumerate(infile):
items = line.rstrip().split(' ')
if len(items) == 2:
# This is a header row giving the shape of the matrix
continue
labels.append(items[0])
values = np.array([float(x) for x in items[1:]], 'f')
rows.append(values)
arr = np.vstack(rows)
return pd.DataFrame(arr, index=labels, dtype='f')
def load_lexicon(filename):
"""
Load a file from Bing Liu's sentiment lexicon
(https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html), containing
English words in Latin-1 encoding.
One file contains a list of positive words, and the other contains
a list of negative words. The files contain comment lines starting
with ';' and blank lines, which should be skipped.
"""
lexicon = []
with open(filename, encoding='latin-1') as infile:
for line in infile:
line = line.rstrip()
if line and not line.startswith(';'):
lexicon.append(line)
return lexicon
def load_data(data_path, embeddings_path, state=0):
pos_words = load_lexicon(data_path + '/positive-words.txt')
neg_words = load_lexicon(data_path + '/negative-words.txt')
embeddings = load_embeddings(embeddings_path)
pos_vectors = embeddings.loc[pos_words].dropna()
neg_vectors = embeddings.loc[neg_words].dropna()
vectors = pd.concat([pos_vectors, neg_vectors])
targets = np.array([1 for entry in pos_vectors.index] + [-1 for entry in neg_vectors.index])
labels = list(pos_vectors.index) + list(neg_vectors.index)
train_vectors, test_vectors, train_targets, test_targets, train_vocab, test_vocab = \
train_test_split(vectors, targets, labels, test_size=0.1, random_state=state)
## Data
X_train = train_vectors.values
X_test = test_vectors.values
# Encoding y
one_hot = OneHotEncoder(sparse=False, categories='auto')
one_hot.fit(np.array(train_targets).reshape(-1,1))
y_train = one_hot.transform(np.array(train_targets).reshape(-1,1))
y_test = one_hot.transform(np.array(test_targets).reshape(-1,1))
return embeddings, X_train, X_test, y_train, y_test, train_vocab, test_vocab
def load_test_names(embeddings):
NAMES_BY_ETHNICITY = {
# The first two lists are from the Caliskan et al. appendix describing the
# Word Embedding Association Test.
'White': [
'Adam', 'Chip', 'Harry', 'Josh', 'Roger', 'Alan', 'Frank', 'Ian', 'Justin',
'Ryan', 'Andrew', 'Fred', 'Jack', 'Matthew', 'Stephen', 'Brad', 'Greg', 'Jed',
'Paul', 'Todd', 'Brandon', 'Hank', 'Jonathan', 'Peter', 'Wilbur', 'Amanda',
'Courtney', 'Heather', 'Melanie', 'Sara', 'Amber', 'Crystal', 'Katie',
'Meredith', 'Shannon', 'Betsy', 'Donna', 'Kristin', 'Nancy', 'Stephanie',
'Bobbie-Sue', 'Ellen', 'Lauren', 'Peggy', 'Sue-Ellen', 'Colleen', 'Emily',
'Megan', 'Rachel', 'Wendy'
],
'Black': [
'Alonzo', 'Jamel', 'Lerone', 'Percell', 'Theo', 'Alphonse', 'Jerome',
'Leroy', 'Rasaan', 'Torrance', 'Darnell', 'Lamar', 'Lionel', 'Rashaun',
'Tyree', 'Deion', 'Lamont', 'Malik', 'Terrence', 'Tyrone', 'Everol',
'Lavon', 'Marcellus', 'Terryl', 'Wardell', 'Aiesha', 'Lashelle', 'Nichelle',
'Shereen', 'Temeka', 'Ebony', 'Latisha', 'Shaniqua', 'Tameisha', 'Teretha',
'Jasmine', 'Latonya', 'Shanise', 'Tanisha', 'Tia', 'Lakisha', 'Latoya',
'Sharise', 'Tashika', 'Yolanda', 'Lashandra', 'Malika', 'Shavonn',
'Tawanda', 'Yvette'
]
}
NAMES_BY_ETHNICITY['White'] = [n.lower() for n in NAMES_BY_ETHNICITY['White'] if n.lower() in embeddings.index]
NAMES_BY_ETHNICITY['Black'] = [n.lower() for n in NAMES_BY_ETHNICITY['Black'] if n.lower() in embeddings.index]
white_female_start = NAMES_BY_ETHNICITY['White'].index('amanda')
black_female_start = NAMES_BY_ETHNICITY['Black'].index('aiesha')
test_gender = white_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['White']) - white_female_start)*['Female']
test_gender += black_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['Black']) - black_female_start)*['Female']
test_df = pd.DataFrame({'name':NAMES_BY_ETHNICITY['White'] + NAMES_BY_ETHNICITY['Black'],
'race':len(NAMES_BY_ETHNICITY['White'])*['White'] + len(NAMES_BY_ETHNICITY['Black'])*['Black'],
'gender':test_gender})
test_names_embed = embeddings.loc[test_df['name']].values
return test_df, test_names_embed
def load_nyc_names(names_path, embeddings):
names_df = pd.read_csv(names_path)
ethnicity_fixed = []
for n in names_df['Ethnicity']:
if n.startswith('BLACK'):
ethnicity_fixed.append('Black')
if n.startswith('WHITE'):
ethnicity_fixed.append('White')
if n.startswith('ASIAN'):
ethnicity_fixed.append('Asian')
if n.startswith('HISPANIC'):
ethnicity_fixed.append('Hispanic')
names_df['Ethnicity'] = ethnicity_fixed
names_df = names_df[np.logical_or(names_df['Ethnicity']=='Black', names_df['Ethnicity']=='White')]
names_df['Child\'s First Name'] = [n.lower() for n in names_df['Child\'s First Name']]
names_from_df = names_df['Child\'s First Name'].values.tolist()
idx_keep = []
for i, n in enumerate(names_from_df):
if n in embeddings.index:
idx_keep.append(i)
names_df = names_df.iloc[idx_keep]
names_from_df = names_df['Child\'s First Name'].values.tolist()
names_embed = embeddings.loc[names_from_df].values
return names_embed
def print_summary(test_df, method_name, test_accuracy):
print(method_name + ' test accuracy %f' % test_accuracy)
mean_sentiments_race = []
for r in ['Black', 'White']:
mean_sent = test_df[method_name + '_logits'][test_df['race']==r].mean()
mean_sentiments_race.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(r, mean_sent))
print(method_name + ' race mean sentiment difference is %f\n' % np.abs(mean_sentiments_race[0] - mean_sentiments_race[1]))
mean_sentiments_gender = []
for g in ['Female', 'Male']:
mean_sent = test_df[method_name + '_logits'][test_df['gender']==g].mean()
mean_sentiments_gender.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(g, mean_sent))
print(method_name + ' gender mean sentiment difference is %f\n' % np.abs(mean_sentiments_gender[0] - mean_sentiments_gender[1]))
sns.boxplot(x='race', y=method_name + '_logits', data=test_df).set_title(method_name, fontsize=30)
plt.ylim(-4.5, 7.)
plt.xlabel('')
plt.ylabel('Logits', size=20, labelpad=-5)
plt.xticks(fontsize=20)
plt.yticks(fontsize=14)
plt.show()
return
|
IBM/sensitive-subspace-robustness
|
utils.py
|
utils.py
|
py
| 7,600 |
python
|
en
|
code
| 13 |
github-code
|
6
|
27277101793
|
from flask import Flask, redirect, render_template, request, url_for, session, flash
import sqlite3
import random
import datetime
import smtplib
from email.mime.text import MIMEText
# sqlite3 connection
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY,
first_name TEXT,
middle_name TEXT,
last_name TEXT,
date_of_birth DATE,
email TEXT UNIQUE,
gender TEXT,
user_id TEXT UNIQUE
)
''')
app = Flask(__name__)
# Role selection
@app.route("/", methods=["POST", "GET"])
def select_role():
if request.method == "POST":
role = request.form.get("role") # Check which role was selected
if role == "admin":
return redirect("/admin-login?role=admin") # Pass role=admin as a query parameter
elif role == "agent":
return redirect("/register-citizen")
return render_template("select_role.html")
# Admin login
@app.route("/admin-login", methods=["POST", "GET"])
def admin_login():
admin_password = "admin123"
# Check if the role query parameter is present and set to "admin"
role = request.args.get("role")
if role != "admin":
return redirect("/") # Redirect to the role selection page if the role is not "admin"
if request.method == "POST":
entered_password = request.form.get("admin_password")
if entered_password == admin_password:
return redirect("/view-citizens")
else:
# Password is incorrect, show an error message
error_message = "Incorrect password. Please try again."
return render_template("admin_login.html", error_message=error_message)
return render_template("admin_login.html")
@app.route("/register-citizen", methods=[ "POST", "GET" ])
def register_citizen():
if request.method == "POST":
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
first_name = request.form["first_name"]
middle_name = request.form["middle_name"]
last_name = request.form["last_name"]
email = request.form["email"]
date_of_birth = request.form["date_of_birth"]
gender = request.form["gender"]
cursor.execute('SELECT id FROM users WHERE email = ?', (email,))
existing_user = cursor.fetchone()
# Check if the email already exists
if not existing_user:
# conn.close()
user_id = generate_citizen_id()
cursor.execute('INSERT INTO users (first_name, middle_name, last_name, email, date_of_birth, gender, user_id) VALUES (?, ?, ?, ?, ?, ?, ?)',
(first_name, middle_name, last_name, email, date_of_birth, gender, user_id))
conn.commit()
# conn.close()
send_code_to_email(first_name, email, user_id)
return render_template("index.html")
# view citizen
@app.route("/view-citizen/<int:user_id>")
def view_citizen(user_id):
try:
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
# Execute an SQL query to select the user with the specified user_id
cursor.execute("SELECT id, first_name, middle_name, last_name, email, date_of_birth, gender, user_id FROM users WHERE id = ?", (user_id,))
user = cursor.fetchone()
conn.close()
return render_template("view_citizen.html", user=user)
except Exception as e:
return f"An error occurred: {str(e)}"
# update citizen
@app.route("/update-citizen/<int:user_id>", methods=["GET", "POST"])
def update_user(user_id):
if request.method == "POST":
# Get the updated information from the form
first_name = request.form["first_name"]
middle_name = request.form["middle_name"]
last_name = request.form["last_name"]
email = request.form["email"]
# Connect to the database and execute an SQL query to update the user's information
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
cursor.execute("UPDATE users SET first_name=?, middle_name=?, last_name=?, email=? WHERE id=?", (first_name, middle_name, last_name, email, user_id))
conn.commit()
conn.close()
return redirect("/view-citizens")
else:
# Display the update form with the current user's information
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
cursor.execute("SELECT id, first_name, middle_name, last_name, email FROM users WHERE id=?", (user_id,))
user = cursor.fetchone()
conn.close()
return render_template("update_citizen.html", user_id=user_id, user=user)
# delete citizen
@app.route("/delete-citizen/<int:user_id>")
def delete_citizen(user_id):
try:
# Connect to the database and execute an SQL query to delete the user
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
cursor.execute("DELETE FROM users WHERE id=?", (user_id,))
conn.commit()
conn.close()
return redirect("/view-citizens")
except Exception as e:
return f"An error occurred: {str(e)}"
# View all citizens
@app.route("/view-citizens")
def view_citizens():
try:
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
# Execute an SQL query to select all users
cursor.execute("SELECT id, first_name, middle_name, last_name, email, date_of_birth, gender, user_id FROM users")
# Fetch all user data
users = cursor.fetchall()
conn.close()
# Render the HTML template and pass the user data
return render_template("view_citizens.html", users=users)
except Exception as e:
return f"An error occurred: {str(e)}"
# erin's function to send code to email
def send_code_to_email(first_name, email, user_id):
# Email configuration
SMTP_SERVER = "smtp.gmail.com"
SMTP_PORT = 587
SMTP_USERNAME = "[email protected]"
SMTP_PASSWORD = "nbxb qojo fyqm ewhn"
msg = MIMEText(f"Hello {first_name}\n\tThank you for processing your application.\n\tYour ID number is: {user_id}.\n\tPLEASE DO NOT SHARE THIS WITH ANYONE!!!!!")
msg["Subject"] = "Your Generated Code"
msg["From"] = SMTP_USERNAME
msg["To"] = email
try:
# Connect to the SMTP server
server = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
server.starttls()
server.login(SMTP_USERNAME, SMTP_PASSWORD)
# Send the email
server.sendmail(SMTP_USERNAME, [email], msg.as_string())
# Disconnect from the server
server.quit()
print(f"Code sent to {email}")
except smtplib.SMTPException as e:
print("SMTP error:", e)
def generate_citizen_id():
citizen_id = ''.join(str(random.randint(0, 9)) for _ in range(9))
return citizen_id
if __name__ == '__main__':
app.run(debug=True)
|
Jordan1570/ID-proj
|
app.py
|
app.py
|
py
| 7,120 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32717559608
|
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
addr = ('localhost', 9000)
sock.connect(addr)
msg = sock.recv(1024)
print(msg.decode())
sock.send("Jeonghyun Song".encode())
msg_stu = sock.recv(1024)
student_number = int.from_bytes(msg_stu, 'big')
print(student_number)
sock.close()
|
Jeong613/net_programming
|
HW2/first_client.py
|
first_client.py
|
py
| 305 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12066160544
|
import enum
from typing import Dict, List, Tuple
# --------------------------
# Basic (string, float, int)
# --------------------------
name: str = "Tay May"
weight: float = 60.2
age: int = 16
print(name)
print(weight)
print(age)
# --------------------------
# List
# --------------------------
thanhvien_cs102: List[str] = ["Tay May", "To Mo", "Robot"]
# có thể thay đổi giá trị
thanhvien_cs102[1] = "Hello"
print(thanhvien_cs102)
# --------------------------
# Tuple
# --------------------------
mytuple: Tuple[str, str, int] = ("Pygame", "with", 102)
# Không thể thay đổi giá trị
# mytuple[2] = 103
print(mytuple)
# --------------------------
# Dictionary
# --------------------------
card: Dict[str, str] = {"course": "CS102", "main": "Pygame"}
print(card["main"])
# --------------------------
# enum
# --------------------------
class GameStateType(enum.Enum):
RUNNING = 0
WON = 1
LOST = 2
state: GameStateType = GameStateType.RUNNING
print(state == GameStateType.RUNNING)
print(state == GameStateType.WON)
# --------------------------
# Function
# --------------------------
def sum(items: List[float]) -> float:
total: float = 0.0
for item in items:
total += item
return total
danh_sach_diem: List[float] = [2.5, 1.5, 2, 3.25]
tong: float = sum(danh_sach_diem)
print(tong)
|
Greninja2021/Steam2022
|
Lesson_1_CS102-CrazyRobot/example_typing.py
|
example_typing.py
|
py
| 1,344 |
python
|
en
|
code
| 2 |
github-code
|
6
|
40327669141
|
from easy_pyechart import _funnel_base_config, constants
from pyecharts import options as opts
from pyecharts.commons.utils import JsCode
from typing import Any, Optional
from pyecharts.charts import Funnel
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
'''漏斗图'''
class eFunnel():
def __init__(
self,
title: Optional[str] = None,
subTitle: Optional[str] = None,
lableList: Optional[list] = [],
valueList: Optional[list] = [],
themeType=constants.defualt_theme,
backgroundImageUrl: Optional[str] = None):
self.opts: dict = {
"lengend": Funnel,
"xList": lableList,
"yList": valueList,
"themeType": themeType,
"backgroundImageUrl": backgroundImageUrl,
"title": title,
"subTitle": subTitle,
}
'''三角形的漏斗图设置'''
def _funnel_chart(self):
return _funnel_base_config(self)
|
jayz2017/easy_pyechart.py
|
easy_pyechart/easy_funnel.py
|
easy_funnel.py
|
py
| 1,050 |
python
|
en
|
code
| 1 |
github-code
|
6
|
11773636601
|
import requests
import json
from pprint import pprint
access_token='<put your access token here>'
page_id='<put your page id here>'
url='https://graph.facebook.com/v2.0/'+page_id+'?feed&access_token='+access_token
r = requests.get(url)
try:
response_json = json.loads(r.text)
except (ValueError, KeyError, TypeError):
print("JSON error")
pprint(response_json)
|
mehta-a/FacebookDataExtraction
|
src/extract.py
|
extract.py
|
py
| 367 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39468004248
|
__doc__ = "this module contains varoous tools"
from datetime import date, datetime
# built in modules:
# import sys
# import os
# modules from pypi (install using `pip install module_name`)
# paramiko
# requests
def input_int(num_range: tuple):
"""
`range`: tuple like (from, to)
"""
frm, to = num_range
option = input(f'enter number between {frm} and {to} ')
while (not option.isdigit()) or (int(option) < frm or int(option) > to):
print("Error: invalid option")
option = input()
return int(option)
def cool():
print("la la la ")
def print_this_time():
"""should be used only when this modle is the main module"""
print(datetime.now())
if __name__ == '__main__':
print_this_time()
|
MrPupik/python-examples
|
zero_to_hero/tools.py
|
tools.py
|
py
| 753 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26529063971
|
from copy import deepcopy
from flask import abort
from flask import Blueprint
from flask import request
from flask_api import status
from oneview_redfish_toolkit.api.event import Event
from oneview_redfish_toolkit.api.event_service import EventService
from oneview_redfish_toolkit.blueprints.util.response_builder import \
ResponseBuilder
from oneview_redfish_toolkit import config
from oneview_redfish_toolkit import util
event_service = Blueprint("event_service", __name__)
ONEVIEW_TEST_ALERT = {
"timestamp": "2018-02-12T20:12:03.231Z",
"resource": {
"category": "alerts",
"associatedResource": {
"resourceName": "0000A66101, bay 3",
"resourceUri": "/rest/server-hardware/"
"30373737-3237-4D32-3230-313530314752"
}
}
}
ONEVIEW_TEST_TASK = {
"timestamp": "2018-02-12T20:12:03.231Z",
"resourceUri": "/rest/server-hardware/"
"30373737-3237-4D32-3230-313530314752",
"changeType": None,
"resource": {
"category": "server-hardware",
"name": "0000A66101, bay 3"
}
}
REDFISH_TO_ONEVIEW_EVENTS = {
"ResourceAdded": "Created",
"ResourceUpdated": "Updated",
"ResourceRemoved": "Deleted"
}
@event_service.route("/redfish/v1/EventService/", methods=["GET"])
def get_event_service():
"""Get the Redfish Event Service.
Get method to return EventService JSON when
/redfish/v1/EventService is requested.
Returns:
JSON: JSON with EventService.
"""
evs = EventService(util.get_delivery_retry_attempts(),
util.get_delivery_retry_interval())
return ResponseBuilder.success(evs)
@event_service.route(
"/redfish/v1/EventService/Actions/EventService.SubmitTestEvent/",
methods=["POST"])
def execute_test_event_action():
"""Executes the SubmitTestEvent Action
Return a JSON containing the EventType received.
Logs exception of any error and return abort.
Returns:
JSON: JSON containing the EventType.
Exceptions:
Exception: Missing EventType property.
Return Bad Request status(400)
"""
if not config.auth_mode_is_conf():
abort(status.HTTP_404_NOT_FOUND,
"EventService is not enabled.")
event_type = None
try:
event_type = request.get_json()['EventType']
except Exception:
abort(status.HTTP_400_BAD_REQUEST,
'Invalid JSON data. Missing EventType property.')
if event_type not in util.get_subscriptions_by_type().keys():
abort(status.HTTP_400_BAD_REQUEST,
'Invalid EventType value: %s' % event_type)
# Creates a sample OneView SCMB message according to
# the value of 'event_type'
if event_type == "Alert":
message = deepcopy(ONEVIEW_TEST_ALERT)
else:
message = deepcopy(ONEVIEW_TEST_TASK)
message['changeType'] = REDFISH_TO_ONEVIEW_EVENTS[event_type]
event = Event(message)
util.dispatch_event(event)
return ResponseBuilder.response(event, status.HTTP_202_ACCEPTED)
|
HewlettPackard/oneview-redfish-toolkit
|
oneview_redfish_toolkit/blueprints/event_service.py
|
event_service.py
|
py
| 3,130 |
python
|
en
|
code
| 16 |
github-code
|
6
|
70101583869
|
from SlackClient import SlackClient
class SlackCalls(SlackClient):
class SlackCallUser(SlackClient):
def __init__(self):
self.slack_id = None
self.external_id = None
self.display_name = None
self.avatar_url = None
def generate_json_user(self):
body = {}
if self.slack_id != None:
body['slack_id'] = self.slack_id
if self.external_id != None:
body['external_id'] = self.external_id
if self.avatar_url != None:
body['avatar_url'] = self.avatar_url
return body
def __init__(self):
self.external_unique_id = None
self.join_url = None
self.created_by = None
self.date_start = None
self.desktop_app_join_url = None
self.external_display_id = None
self.title = None
self.users = None
self.id = None
self.duration = None
self.users = []
def generate_queries(self):
body = {}
if len(self.users) > 0:
body['users'] = self.users
if self.id != None:
body['id'] = self.id
if self.duration != None:
body['duration'] = self.duration
if self.external_unique_id != None:
body['external_unique_id'] = self.external_unique_id
if self.join_url != None:
body['join_url'] = self.join_url
if self.created_by != None:
body['created_by'] = self.created_by
if self.date_start != None:
body['date_start'] = self.date_start
if self.desktop_app_join_url != None:
body['desktop_app_join_url'] = self.desktop_app_join_url
if self.external_display_id != None:
body['external_display_id'] = self.external_display_id
if self.title != None:
body['title'] = self.title
if self.users != None:
body['users'] = self.users
def clear_queries(self):
self.external_unique_id = None
self.join_url = None
self.created_by = None
self.date_start = None
self.desktop_app_join_url = None
self.external_display_id = None
self.title = None
self.users = None
self.id = None
self.duration = None
|
cthacker-udel/Python-Slack-API
|
SlackCalls.py
|
SlackCalls.py
|
py
| 2,336 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10247855105
|
#-*- coding: utf-8 -*-
from itertools import chain
from os.path import dirname, splitext
from sys import platform
from typing import Dict, List, Set, Union
from backend.converters import FileConverter, rar_executables
from backend.db import get_db
from backend.files import scan_files
from backend.volumes import Volume
conversion_methods: Dict[str, Dict[str, FileConverter]] = {}
"source_format -> target_format -> conversion class"
for fc in FileConverter.__subclasses__():
conversion_methods.setdefault(fc.source_format, {})[fc.target_format] = fc
def get_available_formats() -> Set[str]:
"""Get all available formats that can be converted to.
Returns:
Set[str]: The list with all formats
"""
return set(chain.from_iterable(conversion_methods.values()))
def find_target_format_file(
file: str,
formats: List[str]
) -> Union[FileConverter, None]:
"""Get a FileConverter class based on source format and desired formats.
Args:
file (str): The file to get the converter for.
formats (List[str]): The formats to convert to, in order of preference.
Returns:
Union[FileConverter, None]: The converter class that is possible
and most prefered.
In case of no possible conversion, `None` is returned.
"""
source_format = splitext(file)[1].lstrip('.').lower()
if not source_format in conversion_methods:
return
if (
source_format in ('rar', 'cbr')
and not platform in rar_executables
):
return
available_formats = conversion_methods[source_format]
for format in formats:
if source_format == format:
break
if format in available_formats:
return available_formats[format]
return
def convert_file(file: str, formats: List[str]) -> str:
"""Convert a file from one format to another.
Args:
file (str): The file to convert.
formats (List[str]): A list of formats to convert the file to.
Order of list is preference of format (left to right).
Should be key `conversion.conversion_methods` -> source_format dict.
Returns:
str: The path of the converted file.
"""
conversion_class = find_target_format_file(
file,
formats
)
if conversion_class is not None:
return conversion_class().convert(file)
else:
return file
def __get_format_pref_and_files(
volume_id: int,
issue_id: Union[int, None] = None
) -> List[str]:
"""Get the format preference and load the targeted files into the cursor.
Args:
volume_id (int): The ID of the volume to get the files for.
issue_id (Union[int, None], optional): The ID of the issue to get
the files for.
Defaults to None.
Returns:
List[str]: The format preference in the settings
"""
cursor = get_db()
format_preference = cursor.execute(
"SELECT value FROM config WHERE key = 'format_preference' LIMIT 1;"
).fetchone()[0].split(',')
if format_preference == ['']:
format_preference = []
if not issue_id:
cursor.execute("""
SELECT DISTINCT filepath
FROM files f
INNER JOIN issues_files if
INNER JOIN issues i
ON
f.id = if.file_id
AND if.issue_id = i.id
WHERE volume_id = ?
ORDER BY filepath;
""",
(volume_id,)
)
else:
cursor.execute("""
SELECT DISTINCT filepath
FROM files f
INNER JOIN issues_files if
INNER JOIN issues i
ON
f.id = if.file_id
AND if.issue_id = i.id
WHERE
volume_id = ?
AND i.id = ?
ORDER BY filepath;
""",
(volume_id, issue_id)
)
return format_preference
def preview_mass_convert(
volume_id: int,
issue_id: int = None
) -> List[Dict[str, str]]:
"""Get a list of suggested conversions for a volume or issue
Args:
volume_id (int): The ID of the volume to check for.
issue_id (int, optional): The ID of the issue to check for.
Defaults to None.
Returns:
List[Dict[str, str]]: The list of suggestions.
Dicts have the keys `before` and `after`.
"""
cursor = get_db()
format_preference = __get_format_pref_and_files(
volume_id,
issue_id
)
result = []
for (f,) in cursor:
converter = find_target_format_file(
f,
format_preference
)
if converter is not None:
if converter.target_format == 'folder':
result.append({
'before': f,
'after': dirname(f)
})
else:
result.append({
'before': f,
'after': splitext(f)[0] + '.' + converter.target_format
})
return result
def mass_convert(
volume_id: int,
issue_id: Union[int, None] = None,
files: List[str]= []
) -> None:
"""Convert files for a volume or issue.
Args:
volume_id (int): The ID of the volume to convert for.
issue_id (Union[int, None], optional): The ID of the issue to convert for.
Defaults to None.
files (List[str], optional): Only convert files mentioned in this list.
Defaults to [].
"""
# We're checking a lot if strings are in this list,
# so making it a set will increase performance (due to hashing).
files = set(files)
cursor = get_db()
format_preference = __get_format_pref_and_files(
volume_id,
issue_id
)
for (f,) in cursor.fetchall():
if files and f not in files:
continue
converter = find_target_format_file(
f,
format_preference
)
if converter is not None:
converter().convert(f)
scan_files(Volume(volume_id).get_info())
return
|
Casvt/Kapowarr
|
backend/conversion.py
|
conversion.py
|
py
| 5,191 |
python
|
en
|
code
| 221 |
github-code
|
6
|
27516705586
|
from typing import Literal
ver_num = "3.2.2"
online_message = "Oh no, pas encore..."
mods = {}
def enable_module(mod):
mods[mod] = "✅"
def disable_module(mod):
mods[mod] = "❌"
def get_modules():
return mods
ban_domain = ["twitter", "deezer", "spotify"]
values = Literal[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50]
sites_dict = {
"dailymotion": {
"icon_url": "https://upload.wikimedia.org/wikipedia/commons/2/27/Logo_dailymotion.png",
"color": 0x00bff9,
"message": None
},
"soundcloud": {
"icon_url": "https://play-lh.googleusercontent.com/lvYCdrPNFU0Ar_lXln3JShoE-NaYF_V-DNlp4eLRZhUVkj00wAseSIm-60OoCKznpw=w240-h480",
"color": 0xff6800,
"message": None
},
"tiktok": {
"icon_url": "https://cdn.pixabay.com/photo/2021/06/15/12/28/tiktok-6338432_960_720.png",
"color": 0xee1d52,
"message": None
},
"twitch": {
"icon_url": "https://static-00.iconduck.com/assets.00/twitch-icon-2048x2048-tipdihgh.png",
"color": 0x9146ff,
"message": None
},
"twitter": {
"icon_url": "https://e7.pngegg.com/pngimages/804/985/png-clipart-social-media-logo-computer-icons-information-twitter-logo-media.png",
"color": 0x05acf0,
"message": None
},
"youtube": {
"icon_url": "https://cdn.icon-icons.com/icons2/1099/PNG/512/1485482355-youtube_78661.png",
"color": 0xfe0000,
"message": None
},
"reddit": {
"icon_url": "https://freelogopng.com/images/all_img/1658834272reddit-logo-transparent.png",
"color": 0xff4500,
"message": None
},
"générique": {
"thumbnail": "https://images.frandroid.com/wp-content/uploads/2018/08/guide-apps-video-android.jpg",
"icon_url": "https://cdn0.iconfinder.com/data/icons/basic-uses-symbol-vol-2/100/Help_Need_Suggestion_Question_Unknown-512.png",
"color": 0xffffff,
"message": None
}
}
|
Tintin361/Kiri-Chan
|
tools/variables.py
|
variables.py
|
py
| 2,005 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24746745289
|
# -*- coding: utf-8 -*-
import json
import urllib
from django.contrib import auth
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.conf import settings
from common import utils, page
from www.misc import qiniu_client
from www.misc.decorators import staff_required, common_ajax_response, verify_permission
from www.question.interface import TopicBase
@verify_permission('')
def topic(request, template_name='admin/topic.html'):
from www.question.models import Topic
states = [{'name': x[1], 'value': x[0]} for x in Topic.state_choices]
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
def format_topic(objs, num):
data = []
for x in objs:
num += 1
data.append({
'num': num,
'topic_id': x.id,
'name': x.name,
'domain': x.domain,
'parent_id': x.parent_topic.id if x.parent_topic else '',
'parent_name': x.parent_topic.name if x.parent_topic else '',
'child_count': x.child_count,
'follower_count': x.follower_count,
'question_count': x.question_count,
'level': x.level,
'img': x.get_img(),
'des': x.des,
'sort': x.sort_num,
'is_show': x.is_show,
'state': x.state,
'create_time': str(x.create_time)
})
return data
@verify_permission('query_topic')
def search(request):
topic_name = request.POST.get('topic_name')
page_index = int(request.POST.get('page_index', 1))
data = []
if topic_name:
objs = TopicBase().get_topic_by_name(topic_name)
objs = [objs] if objs else []
else:
objs = TopicBase().get_all_topics()
page_objs = page.Cpt(objs, count=10, page=page_index).info
num = 10 * (page_index - 1)
data = format_topic(page_objs[0], num)
return HttpResponse(
json.dumps({'data': data, 'page_count': page_objs[4], 'total_count': page_objs[5]}),
mimetype='application/json'
)
@verify_permission('query_topic')
def get_topics_by_name(request):
topic_name = request.REQUEST.get('topic_name')
result = []
topics = TopicBase().get_topics_by_name(topic_name)
if topics:
for x in topics:
result.append([x.id, x.name, None, x.name])
return HttpResponse(json.dumps(result), mimetype='application/json')
@verify_permission('query_topic')
def get_topic_by_id(request):
data = ""
topic_id = request.REQUEST.get('topic_id')
obj = TopicBase().get_topic_by_id_or_domain(topic_id, False)
if obj:
data = format_topic([obj], 1)[0]
return HttpResponse(json.dumps(data), mimetype='application/json')
@verify_permission('modify_topic')
def modify_topic(request):
topic_id = request.REQUEST.get('topic_id')
name = request.REQUEST.get('name')
domain = request.REQUEST.get('domain')
des = request.REQUEST.get('des')
state = request.REQUEST.get('state')
sort = request.REQUEST.get('sort')
parent_topic_id = request.REQUEST.get('parent_id')
tb = TopicBase()
obj = tb.get_topic_by_id_or_domain(topic_id, False)
img_name = obj.img
img = request.FILES.get('img')
if img:
flag, img_name = qiniu_client.upload_img(img, img_type='topic')
img_name = '%s/%s' % (settings.IMG0_DOMAIN, img_name)
code, msg = tb.modify_topic(topic_id, name, domain, des, img_name, state, parent_topic_id, sort)
if code == 0:
url = "/admin/topic?#modify/%s" % (topic_id)
else:
url = "/admin/topic?%s#modify/%s" % (msg, topic_id)
return HttpResponseRedirect(url)
@verify_permission('add_topic')
def add_topic(request):
name = request.REQUEST.get('name')
domain = request.REQUEST.get('domain')
des = request.REQUEST.get('des')
state = request.REQUEST.get('state')
sort = request.REQUEST.get('sort')
parent_topic_id = request.REQUEST.get('parent_id')
tb = TopicBase()
img_name = ''
img = request.FILES.get('img')
if img:
flag, img_name = qiniu_client.upload_img(img, img_type='topic')
img_name = '%s/%s' % (settings.IMG0_DOMAIN, img_name)
flag, msg = tb.create_topic(name, domain, parent_topic_id, img_name, des)
if flag == 0:
url = "/admin/topic?#modify/%s" % (msg)
else:
url = "/admin/topic?%s" % (msg)
return HttpResponseRedirect(url)
|
lantianlz/zx
|
www/admin/views_topic.py
|
views_topic.py
|
py
| 4,554 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34858917903
|
# script to find best results of mnist evaluation based on accuracy, same accuracy is not taken care of
import numpy as np
# opening file
with open('log.txt') as file:
# reading all lines from file, skipping first 2 lines
lines = file.readlines()[2:]
# closing file
file.close()
# declaring list scores
scores = []
# appending characters until first space (accuracy) to scores list
[scores.append(line.split(' ')[0]) for line in lines]
# finding index of highest value using numpy
idx = np.argmax(np.array(scores))
# printing entire line of best accuracy
print(lines[idx])
|
maixnor/mnist
|
mnist/findBest.py
|
findBest.py
|
py
| 657 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2001310411
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from .base import BaseModel
from .user_group import UserGroup
class Event(BaseModel):
TYPE_1, TYPE_2 = xrange(2)
SOURCE_GROUP, SOURCE_INDIVIDUAL = xrange(2)
EVENT_TYPES = (
(TYPE_1, 'Collecting'),
(TYPE_2, 'Spent')
)
SOURCE_TYPES =(
(SOURCE_GROUP, 'Group'),
(SOURCE_INDIVIDUAL, 'Individual')
)
group = models.ForeignKey(
UserGroup,
on_delete=models.CASCADE,
related_name="events"
)
event_type = models.SmallIntegerField(
choices=EVENT_TYPES)
amount = models.DecimalField(default=0.0, max_digits=10, decimal_places=2)
description = models.TextField(default=None)
source_money = models.SmallIntegerField(choices=SOURCE_TYPES, default=SOURCE_GROUP)
member_join = models.TextField(null=True)
def collecting_money(self):
from .transaction import Transaction
if self.event_type == self.TYPE_1:
for user in self.group.members.filter(id__in=self.member_join_list):
Transaction.create_user_paid_transaction(
user=user,
amount=self.amount,
description=u"<@%s> đóng tiền vào quỹ '%s' cho event '%s'" %(user.userprofile.slack_id, self.group.name, self.description),
group=self.group,
paid_group=True,
event_id=self.id
)
if self.event_type == self.TYPE_2:
if self.source_money == self.SOURCE_GROUP:
Transaction.create_group_paid_transaction(
amount=self.amount,
description=u"<@%s> trả tiền cho event '%s'" %(self.group.name, self.description),
group=self.group,
event=self
)
if self.source_money == self.SOURCE_INDIVIDUAL:
for user in self.group.members.filter(id__in=self.member_join_list):
Transaction.create_user_paid_transaction(
user=user,
amount=self.amount,
description=u"<@%s> trả tiền cho event '%s' của '%s'" %(user.userprofile.slack_id, self.description, self.group.name),
group=self.group,
paid_group=False,
event_id=self.id
)
@property
def event_complete_status(self):
from .transaction import Transaction
if self.transactions.filter(status=Transaction.PENDING).count() > 0:
return False
return True
@property
def member_join_list(self):
return [int(member) for member in self.member_join.split(",")]
|
luhonghai/expense
|
expense/apps/mobile_api/models/event.py
|
event.py
|
py
| 2,818 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34832289590
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0) #取得視訊鏡頭的畫面
#不同顏色的筆
pen_color_HSV = [[86, 121, 205, 111, 245, 255],
[46, 78, 172, 71, 255, 255],
[22, 70, 214, 31, 255, 255]
]
#不同顏色的筆對應的筆尖
pen_color_BGR = [[255, 0, 0],
[0, 255, 0],
[0, 255, 255]
]
#記錄每個筆畫過的位置和顏色 [x, y, color_ID]
draw_points = []
def find_pen(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#用迴圈跑每個顏色
for i in range(len(pen_color_HSV)):
#最大和最小值
lower = np.array(pen_color_HSV[i][:3])
upper = np.array(pen_color_HSV[i][3:6])
mask = cv2.inRange(hsv, lower, upper)
#過濾顏色
result = cv2.bitwise_and(img, img, mask = mask)
pen_x, pen_y = find_contour(mask)
cv2.circle(img_contour, (pen_x, pen_y), 10, pen_color_BGR[i], cv2.FILLED)
#先判斷是否有偵測到輪廓
if(pen_y != -1):
draw_points.append([pen_x, pen_y, i])
# cv2.imshow("result", result)
def find_contour(img):
#檢測輪廓
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
x, y, w, h = -1, -1, -1, -1
for cnt in contours:
cv2.drawContours(img_contour, cnt, -1, (0,0,0), 4) #描邊
#畫外切正方形
area = cv2.contourArea(cnt)
if(area > 500):
peri = cv2.arcLength(cnt, True)
vertices = cv2.approxPolyDP(cnt, peri * 0.02, True) #頂點
x, y, w, h = cv2.boundingRect(vertices) #把每個圖形用方形匡起來 左上x座標、左上y座標、寬度、高度
return x+w//2, y
#用筆畫圖
def draw(draw_points):
for point in draw_points:
cv2.circle(img_contour, (point[0], point[1]), 10, pen_color_BGR[point[2]], cv2.FILLED)
#顯示影片
while(True):
ret, frame = cap.read() #回傳兩個變數(有無成功取得下一幀->bool + 下一幀的圖片)
if ret:
img_contour = frame.copy()
find_pen(frame)
draw(draw_points)
cv2.imshow("contour", img_contour)
else:
break
if cv2.waitKey(1) == ord("q"): #若輸入q則結束影片
break
|
jim2832/Image-Recognition
|
virtual_pen.py
|
virtual_pen.py
|
py
| 2,304 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30297740447
|
# Seq2Seq model with attention
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from copy import copy
def init_weights(m):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def train_an_epoch(model, iterator, optimizer, criterion, clip, device, scheduler, tf_ratio=1.):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch[0].float().to(device)
trg = copy(batch[0]).float().to(device)
optimizer.zero_grad()
output = model(src, trg, tf_ratio).permute(1, 0, 2)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
scheduler.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion, device):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch[0].float().to(device)
trg = copy(batch[0]).float().to(device)
output = model(src, trg, 0).permute(1, 0, 2) #turn off teacher forcing
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
class Encoder(nn.Module):
def __init__(self, num_layers=2, input_dim=10, emb_dim=64, enc_hid_dim=128, dec_hid_dim=128, dropout=0.5):
super().__init__()
self.embedding = nn.Linear(input_dim, emb_dim)
self.norm = nn.LayerNorm(emb_dim)
self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True, batch_first = True, num_layers=num_layers)
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
def forward(self, src):
embedded = self.dropout(self.embedding(src))
embedded = self.norm(embedded)
#embedded = [batch size, n_time_steps, emb dim]
outputs, hidden = self.rnn(embedded)
#outputs = [batch size, n_time_steps, hid dim * num directions]
#hidden = [n layers * num directions, batch size, hid dim]
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
#hidden is stacked [forward_1, backward_1, forward_2, backward_2, ...]
#outputs are always from the last layer
#hidden = [batch size, dec hid dim]
return outputs, hidden
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim):
super().__init__()
self.norm = nn.LayerNorm((enc_hid_dim * 2) + dec_hid_dim)
self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim)
self.v = nn.Linear(dec_hid_dim, 1, bias = False)
def forward(self, hidden, encoder_outputs):
#hidden = [batch size, dec hid dim]
#encoder_outputs = [batch size, src len, enc hid dim * 2]
src_len = encoder_outputs.shape[1]
#repeat decoder hidden state src_len times
hidden = hidden.unsqueeze(1).repeat(1, src_len, 1)
#hidden = [batch size, src len, dec hid dim]
#encoder_outputs = [batch size, src len, enc hid dim * 2]
energy = torch.tanh(self.attn(self.norm(torch.cat((hidden, encoder_outputs), dim = 2))))
#energy = [batch size, src len, dec hid dim]
attention = self.v(energy).squeeze(2)
#attention= [batch size, src len]
return F.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self, attention, num_layer=1, output_dim=10, emb_dim=64, enc_hid_dim=128, dec_hid_dim=128, dropout=0.5):
super().__init__()
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Linear(output_dim, emb_dim)
self.norm = nn.LayerNorm(emb_dim)
self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim, num_layers=num_layer)
self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs, dec_hid=None):
#input = [batch size, n_features]
#hidden = [batch size, dec hid dim]
#encoder_outputs = [batch size, n_time_steps, enc hid dim * 2]
input = input.unsqueeze(1)
#input = [batch size, 1, n_features]
embedded = self.dropout(self.embedding(input)).permute(1, 0, 2)
embedded = self.norm(embedded)
#embedded = [1, batch_size, emb dim]
a = self.attention(hidden, encoder_outputs)
#a = [batch size, src len]
a = a.unsqueeze(1)
#a = [batch size, 1, src len]
weighted = torch.bmm(a, encoder_outputs)
#weighted = [batch size, 1, enc hid dim * 2]
weighted = weighted.permute(1, 0, 2)
#weighted = [1, batch size, enc hid dim * 2]
rnn_input = torch.cat((embedded, weighted), dim = 2)
#rnn_input = [1, batch size, (enc hid dim * 2) + emb dim]
if dec_hid == None:
output, dec_hid = self.rnn(rnn_input)
else:
output, dec_hid = self.rnn(rnn_input, dec_hid)
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted = weighted.squeeze(0)
prediction = self.fc_out(torch.cat((output, weighted, embedded), dim = 1))
#prediction = [batch size, output dim]
return prediction, dec_hid
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[0]
trg_len = trg.shape[1]
trg_vocab_size = self.decoder.output_dim
#tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
#encoder_outputs is all hidden states of the input sequence, back and forwards
#hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src)
#first input to the decoder (start token = [0, ..., 0])
input = torch.zeros_like(trg[:, 0, :]).to(self.device)
dec_hid = None
for t in range(0, trg_len-1):
#insert input token embedding, previous hidden state and all encoder hidden states
#receive output tensor (predictions) and new hidden state
output, dec_hid = self.decoder(input, hidden, encoder_outputs, dec_hid)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
hidden = dec_hid[-1, ...]
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
#if teacher forcing, use actual next token as next input
#if not, use predicted token
input = trg[:, t, :] if teacher_force else output
return outputs
|
three0-s/KT-ETRI
|
model.py
|
model.py
|
py
| 7,755 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15038954927
|
import tkinter
from PIL import Image
import time
import pygame
# Ссылка на гугл диск, на котором файл с музыкой. Он не прошла по размеру на github. https://drive.google.com/drive/folders/1RzTOtOH4LLt6UE6C6TCYG-0Quf38lkTE
pygame.init()
pygame.mixer.music.load("music.wav")
pygame.mixer.music.play(-1)
def game():
code_symbols = []
for i in range(10):
code_symbols.append(i)
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i in alphabet:
code_symbols.append(i)
middle = 9
def block(k):
from random import randint
interval = k * middle
summa = 0
result = ''
while summa <= interval:
summa = 0
result = ''
for i in range(k):
a = randint(0, 35)
summa += a
result += str(code_symbols[a])
return result
def clicked():
lbl = tkinter.Label(window, text="Успешно! Ваш код: " + block(5) + '-' + block(4) + '-' + block(4),
font=("Arial Bold", 25),
bg='Gray')
lbl.grid(column=0, row=0)
window = tkinter.Tk()
window.title("Добро пожаловать в генерацию кода")
window.geometry('1313x833')
window.image = tkinter.PhotoImage(file='gta.png')
bg_gta = tkinter.Label(window, image=window.image)
bg_gta.grid(column=0, row=0)
btn_1 = tkinter.Button(window, text="Сгенерировать код", font=("Arial Bold", 15), bg='Gray', command=clicked)
btn_1.grid(column=0, row=0)
window.mainloop()
def animation(count, k):
global anim
global frames
im2 = im[count]
gif_label.configure(image=im2)
count += 1
k += 1
time.sleep(0.5)
if count == frames:
count = 0
if k == frames + 1:
root.destroy()
game()
anim = root.after(50, lambda: animation(count, k))
root = tkinter.Tk()
root.title('Мы начинаем!')
file = "10.gif"
root.geometry('627x627')
info = Image.open(file)
frames = info.n_frames # gives total number of frames that gif contains
# creating list of PhotoImage objects for each frames
im = [tkinter.PhotoImage(file=file, format=f"gif -index {i}") for i in range(frames)]
count = 0
k = 0
anim = None
gif_label = tkinter.Label(root, image="")
gif_label.pack()
btn_2 = tkinter.Button(root, text="СТАРТ", font=("Arial Bold", 15), command=lambda: animation(count, k))
btn_2.pack()
root.mainloop()
|
PashaSeleznev/Lab_4
|
main.py
|
main.py
|
py
| 2,652 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33272740574
|
from random import random
import numpy
from copy import deepcopy
from texttable import Texttable
class Ant:
def __init__(self, size):
self._size = size
self._representation = [[] for i in range(size * 2)]
self._graph = [self._representation]
self._freeSpots = size - 1
def getFreeSpotsCount(self):
return self._freeSpots
def getRepresentation(self):
return self._representation
def getGraph(self):
return self._graph[:]
def setRepresentation(self, newRepresentation):
if len(self._representation[-1]) > len(newRepresentation[-1]):
self.decreaseFreeSpots()
self._representation = deepcopy(newRepresentation)
def decreaseFreeSpots(self):
self._freeSpots -= 1
def nextPossibilities(self):
possibilities = []
for i in range(self._size * 2 * (self._freeSpots)):
newPossibility = deepcopy(self._representation)
for row in range(self._size * 2):
possibleNumbers = [i for i in range(1, self._size + 1)]
for elem in self._representation[row]:
possibleNumbers.remove(elem)
# if row >= self._size and newPossibility[row - self._size][-1] in possibleNumbers:
# possibleNumbers.remove(newPossibility[row - self._size][-1])
choice = numpy.random.choice(possibleNumbers)
newPossibility[row].append(choice)
possibleNumbers.remove(choice)
possibilities.append(newPossibility)
return possibilities
def move(self, q0, trace, alpha, beta):
nextPossibilities = self.nextPossibilities()
distances = []
if len(nextPossibilities) == 0:
return False
auxAnt = Ant(self._size)
for position in nextPossibilities:
auxAnt.setRepresentation(position)
distances.append([position, auxAnt.fitness() - self.fitness()])
for i in range(len(distances)):
index = [0, False]
while index[0] < len(trace) or index[1]:
if trace[index[0]] == distances[i][0]:
index[1] = True
index[0] += 1
if index[1]:
distances[i][1] = (distances[i][1] ** beta) * (trace(index[0]) ** alpha)
if numpy.random.random() < q0:
distances = min(distances, key=lambda elem:elem[1])
self.setRepresentation(distances[0])
self._graph.append(self._representation)
else:
suma = 0
for elem in distances:
suma += elem[1]
if suma == 0:
choice = numpy.random.randint(0, len(distances))
self.setRepresentation(distances[choice][0])
self._graph.append(self._representation)
return
distances = [[distances[i][0], distances[i][1] / suma] for i in range(len(distances))]
for i in range(len(distances)):
sum = 0
for j in range(i+1):
sum += distances[j][1]
distances[i][1] = sum
choice = numpy.random.random()
i = 0
while choice > distances[i][1]:
i += 1
self.setRepresentation(distances[i][0])
self._graph.append(self._representation)
return True
def __str__(self):
table = Texttable()
for i in range(self._size):
row = []
for j in range(len(self._representation[i])):
row.append((self._representation[i][j], self._representation[i + self._size][j]))
table.add_row(row)
return table.draw()
def fitness(self):
fitness = 0
for i in range(self._size):
for j in range(len(self._representation[i])):
if self._representation[i][j] == self._representation[i + self._size][j]:
fitness += 1
if i < len(self._representation[i]) and self._representation[j][i] == self._representation[j + self._size][i]:
fitness += 1
for i in range(self._size - 1):
for j in range(i + 1, self._size):
fitness += numpy.count_nonzero(
numpy.equal(self._representation[i + self._size], self._representation[j + self._size]))
fitness += numpy.count_nonzero(numpy.equal(self._representation[i], self._representation[j]))
for i in range(len(self._representation[-1]) - 1):
column11 = [self._representation[j][i] for j in range(self._size)]
column12 = [self._representation[j + self._size][i] for j in range(self._size)]
for j in range(i + 1, len(self._representation[i])):
column21 = [self._representation[k][j] for k in range(self._size)]
column22 = [self._representation[k + self._size][j] for k in range(self._size)]
fitness += numpy.count_nonzero(numpy.equal(column11, column21))
fitness += numpy.count_nonzero(numpy.equal(column12, column22))
return fitness
|
CMihai998/Artificial-Intelligence
|
Lab4 - ACO/models/ant.py
|
ant.py
|
py
| 5,321 |
python
|
en
|
code
| 3 |
github-code
|
6
|
3596407711
|
from tkinter import *
#tkinter module GUI application
fruits=["dragon fruits","Apple","banana","grapes","pine apple","papaya"]
win=Tk()
win.title("Demo GUI 1")
win.geometry("400x400")
li=Listbox(win,foreground="blue",bg="tomato")
index=1
for fruit in fruits:
li.insert(index,fruit+str(index))
index=index+1
li.pack(side=RIGHT)
win.mainloop()
|
AshishSirVision/Mysql-python-code
|
p6.py
|
p6.py
|
py
| 373 |
python
|
en
|
code
| 1 |
github-code
|
6
|
44313582014
|
from tkinter import *
import sqlite3
from PIL import ImageTk, Image
from backend import Database
import requests
database=Database("books.db")
class Window(object):
def __init__(self,window):
self.window=window
self.window.title("Bookstore")
self.window.configure(bg='#856ff8')
#Title input
self.title=StringVar()
self.l1=Label(text="Title")
self.l1.grid(row=0,column=0)
self.e1=Entry(window,textvariable=self.title)
self.e1.grid(row=0,column=1)
#Author input
self.author=StringVar()
self.l2=Label(text="Author")
self.l2.grid(row=0,column=2)
self.e2=Entry(window,textvariable=self.author)
self.e2.grid(row=0,column=3)
#Year input
self.year=StringVar()
self.l3=Label(text="Year")
self.l3.grid(row=1,column=0)
self.e3=Entry(window,textvariable=self.year)
self.e3.grid(row=1,column=1)
#Genre input
self.genre=StringVar()
self.l4=Label(text="Genre")
self.l4.grid(row=1,column=2)
self.e4=Entry(window,textvariable=self.genre)
self.e4.grid(row=1,column=3)
#Rate input
self.rate=StringVar()
self.l5=Label(text="Rating")
self.l5.grid(row=2,column=0)
self.e5=Entry(window,textvariable=self.rate)
self.e5.grid(row=2,column=1)
#ISBN input
self.isbn=StringVar()
self.l6=Label(text="ISBN")
self.l6.grid(row=2,column=2)
self.e6=Entry(window,textvariable=self.isbn)
self.e6.grid(row=2,column=3)
#URL input
self.img_url=StringVar()
self.l7=Label(text="Image Url")
self.l7.grid(row=3,column=1)
self.e7=Entry(window,textvariable=self.img_url)
self.e7.grid(row=3,column=2)
self.list1=Listbox(window,width=75)
self.list1.grid(row=0,column=5,rowspan=6)
sb1=Scrollbar(window)
sb1.grid(row=0,column=6,rowspan=6)
self.list1.configure(yscrollcommand=sb1.set)
sb1.configure(command=self.list1.yview)
self.list1.bind('<<ListboxSelect>>',self.CurSelect)
#Search Button
b1=Button(text ="Search Entry", width=15,foreground="red",command=self.search)
b1.grid(row=4,column=1)
#Add Button
b2=Button(text ="Add Entry", width=15,command=self.add)
b2.grid(row=4,column=3)
#Update Button
b3=Button(text ="Update Entry", width=15,command=self.update)
b3.grid(row=5,column=1)
#Delete Button
b4=Button(text ="Delete Entry", width=15,command=self.delete)
b4.grid(row=5,column=3)
b5=Button(text ="Close", width=15,command=window.destroy)
b5.grid(row=5,column=2)
def view(self):
for row in database.view():
self.list1.insert(END,row)
def search(self):
self.list1.delete(0,END)
for row in database.search(title.get(),author.get(),year.get(),genre.get(),rate.get(),isbn.get()):
self.list1.insert(END,row)
def CurSelect(self,e):
if self.list1.size()!=0:
global selected_tuple
global img
#this one will get the index and to be used for deleting an item
index=list1.curselection()[0]
self.selected_tuple=self.list1.get(index)
value=self.list1.get(self.list1.curselection())
#This part will create the image box on the side and it will display the image that belongs to the book
self.img_url1=value[len(value)-1]
self.img = Image.open(requests.get(img_url1, stream=True).raw)
self.img = img.resize((170,170), Image.ANTIALIAS)
self.img = ImageTk.PhotoImage(img)
self.l8=Label(image=img)
self.l8.grid(row=0,column=7,rowspan=5)
#this part will bring selected item to the entrees
self.e1.delete(0,END)
self.e1.insert(END,self.selected_tuple[1])
self.e2.delete(0,END)
self.e2.insert(END,self.selected_tuple[2])
self.e3.delete(0,END)
self.e3.insert(END,self.selected_tuple[3])
self.e4.delete(0,END)
self.e4.insert(END,self.selected_tuple[4])
self.e5.delete(0,END)
self.e5.insert(END,self.selected_tuple[5])
self.e6.delete(0,END)
self.e6.insert(END,self.selected_tuple[6])
self.e7.delete(0,END)
self.e7.insert(END, self.selected_tuple[7])
def delete(self):
database.delete(self.selected_tuple[0])
self.list1.delete(0,END)
view()
def add(self):
database.insert(self.title.get(),self.author.get(),self.year.get(),self.genre.get(),self.rate.get(),self.isbn.get(),self.img_url.get())
self.e1.delete(0,END)
self.e2.delete(0,END)
self.e3.delete(0,END)
self.e4.delete(0,END)
self.e5.delete(0,END)
self.e6.delete(0,END)
self.e7.delete(0,END)
self.list1.delete(0,END)
self.view()
def update(self):
database.update(self.selected_tuple[0],self.title.get(),self.author.get(),self.year.get(),self.genre.get(),self.rate.get(),self.isbn.get(),self.img_url.get())
self.list1.delete(0,END)
view()
window= Tk()
win=Window(window)
win.view()
window.mainloop()
|
mertwithamouth/Py_Projects
|
Book Store/bookstore.py
|
bookstore.py
|
py
| 5,422 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24168954856
|
import asyncio
import importlib
import re
from contextlib import closing, suppress
from uvloop import install
from pyrogram import filters, idle
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
from Yukinon.menu import *
from Yukinon import *
from Yukinon.plugins import ALL_MODULES
from Yukinon.utils import paginate_modules
from lang import get_command
from Yukinon.utils.lang import *
from Yukinon.utils.commands import *
from Yukinon.mongo.rulesdb import *
from Yukinon.utils.start import *
from Yukinon.mongo.usersdb import *
from Yukinon.mongo.restart import *
from Yukinon.mongo.chatsdb import *
from Yukinon.plugins.fsub import ForceSub
import random
loop = asyncio.get_event_loop()
flood = {}
START_COMMAND = get_command("START_COMMAND")
HELP_COMMAND = get_command("HELP_COMMAND")
HELPABLE = {}
async def start_bot():
global HELPABLE
for module in ALL_MODULES:
imported_module = importlib.import_module("Yukinon.plugins." + module)
if (
hasattr(imported_module, "__MODULE__")
and imported_module.__MODULE__
):
imported_module.__MODULE__ = imported_module.__MODULE__
if (
hasattr(imported_module, "__HELP__")
and imported_module.__HELP__
):
HELPABLE[
imported_module.__MODULE__.replace(" ", "_").lower()
] = imported_module
all_module = ""
j = 1
for i in ALL_MODULES:
all_module = "•≫ Successfully imported:{:<15}.py".format(i)
print(all_module)
restart_data = await clean_restart_stage()
try:
if restart_data:
await app.edit_message_text(
restart_data["chat_id"],
restart_data["message_id"],
"**Restarted Successfully**",
)
else:
await app.send_message(LOG_GROUP_ID, "Yukinon Robot started!")
except Exception as e:
print(e)
#print(f"{all_module}")
print("""
_____________________________________________
| |
| Deployed Successfully |
| (C) 2021-2022 by @TechZBots |
|_____________________________________________|
""")
await idle()
await aiohttpsession.close()
await app.stop()
for task in asyncio.all_tasks():
task.cancel()
home_keyboard_pm = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text=" ➕ Add Me To Your Group ➕ ",
url=f"http://t.me/{BOT_USERNAME}?startgroup=new",
)
],
[
InlineKeyboardButton(
text=" ℹ️ About", callback_data="_about"
),
InlineKeyboardButton(
text="🌍 languages ", callback_data="_langs"
),
],
[
InlineKeyboardButton(
text="📮 How To Use Me", callback_data="bot_commands"
),
],
[
InlineKeyboardButton(
text="🌐 My Website",
url=f"https://szrosebot.ml",
),
InlineKeyboardButton(
text="🔰News Channel",
url=f"https://t.me/szroseupdates",
)
],
]
)
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="📚 Commands & help",
url=f"t.me/{BOT_USERNAME}?start=help",
)
]
]
)
IMG = ["https://telegra.ph/file/c8f5c1dd990ca9a3d8516.jpg",
"https://telegra.ph/file/77cc3154b752ce822fd52.jpg",
"https://telegra.ph/file/e72fb0b6a7fba177cf4c7.jpg",
"https://telegra.ph/file/8738a478904238e367939.jpg",
"https://telegra.ph/file/68d7830ba72820f44bda0.jpg"
]
@app.on_message(filters.command(START_COMMAND))
@language
async def start(client, message: Message, _):
FSub = await ForceSub(bot, message)
if FSub == 400:
return
chat_id = message.chat.id
if message.sender_chat:
return
if message.chat.type != "private":
await message.reply(
_["main2"], reply_markup=keyboard)
return await add_served_chat(message.chat.id)
if len(message.text.split()) > 1:
name = (message.text.split(None, 1)[1]).lower()
if name.startswith("rules"):
await get_private_rules(app, message, name)
return
elif "_" in name:
module = name.split("_", 1)[1]
text = (_["main6"].format({HELPABLE[module].__MODULE__}
+ HELPABLE[module].__HELP__)
)
await message.reply(text, disable_web_page_preview=True)
elif name == "help":
text, keyb = await help_parser(message.from_user.first_name)
await message.reply(
_["main5"],
reply_markup=keyb,
disable_web_page_preview=True,
)
elif name == "connections":
await message.reply("Run /connections to view or disconnect from groups!")
else:
served_chats = len(await get_served_chats())
served_chats = []
chats = await get_served_chats()
for chat in chats:
served_chats.append(int(chat["chat_id"]))
served_users = len(await get_served_users())
served_users = []
users = await get_served_users()
for user in users:
served_users.append(int(user["bot_users"]))
await message.reply(f"""
[👋]({random.choice(IMG)}) Hey there {message.from_user.mention},
My name is Yukinon, an advanced telegram Group management Bot For helpYou Protect Your Groups & Suit For All Your Needs.
I currently manage about `{len(served_chats)}` groups.I have over `{len(served_users)}` users
⚒ Send Me /help For Get Commands.
👨💻Dᴇᴠᴇʟᴏᴘᴇʀ : @supunma
""",
reply_markup=home_keyboard_pm,
)
return await add_served_user(message.from_user.id)
@app.on_message(filters.command(HELP_COMMAND))
@language
async def help_command(client, message: Message, _):
FSub = await ForceSub(bot, message)
if FSub == 400:
return
if message.chat.type != "private":
if len(message.command) >= 2:
name = (message.text.split(None, 1)[1]).replace(" ", "_").lower()
if str(name) in HELPABLE:
key = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text=_["main3"],
url=f"t.me/{BOT_USERNAME}?start=help_{name}",
)
],
]
)
await message.reply(
_["main4"],
reply_markup=key,
)
else:
await message.reply(
_["main2"], reply_markup=keyboard
)
else:
await message.reply(
_["main2"], reply_markup=keyboard
)
else:
if len(message.command) >= 2:
name = (message.text.split(None, 1)[1]).replace(" ", "_").lower()
if str(name) in HELPABLE:
text = (_["main6"].format({HELPABLE[name].__MODULE__}
+ HELPABLE[name].__HELP__)
)
if hasattr(HELPABLE[name], "__helpbtns__"):
button = (HELPABLE[name].__helpbtns__) + [[InlineKeyboardButton("« Back", callback_data="bot_commands")]]
if not hasattr(HELPABLE[name], "__helpbtns__"): button = [[InlineKeyboardButton("« Back", callback_data="bot_commands")]]
await message.reply(text,
reply_markup=InlineKeyboardMarkup(button),
disable_web_page_preview=True)
else:
text, help_keyboard = await help_parser(
message.from_user.first_name
)
await message.reply(
_["main5"],
reply_markup=help_keyboard,
disable_web_page_preview=True,
)
else:
text, help_keyboard = await help_parser(
message.from_user.first_name
)
await message.reply(
text, reply_markup=help_keyboard, disable_web_page_preview=True
)
return
@app.on_callback_query(filters.regex("startcq"))
@languageCB
async def startcq(client,CallbackQuery, _):
served_chats = len(await get_served_chats())
served_chats = []
chats = await get_served_chats()
for chat in chats:
served_chats.append(int(chat["chat_id"]))
served_users = len(await get_served_users())
served_users = []
users = await get_served_users()
for user in users:
served_users.append(int(user["bot_users"]))
await CallbackQuery.message.edit(
text=f"""
👋 Hey there {CallbackQuery.from_user.mention},
My name is Yukinon ,an advanced telegram Group management Bot For help
You Protect Your Groups & Suit For All Your Needs.
I currently manage about `{len(served_chats)}` groups.I have over `{len(served_users)}` users
⚒ Send Me /help For Get Commands.
👨💻Dᴇᴠᴇʟᴏᴘᴇʀ : @supunma
""",
disable_web_page_preview=True,
reply_markup=home_keyboard_pm)
async def help_parser(name, keyboard=None):
if not keyboard:
keyboard = InlineKeyboardMarkup(paginate_modules(0, HELPABLE, "help"))
return (
"""
**Welcome to help menu**
I'm a group management bot with some useful features.
You can choose an option below, by clicking a button.
If you have any bugs or questions on how to use me,
have a look at my [Docs](https://szsupunma.gitbook.io/rose-bot/), or head to @szteambots.
**All commands can be used with the following: / **""",
keyboard,
)
@app.on_callback_query(filters.regex("bot_commands"))
@languageCB
async def commands_callbacc(client,CallbackQuery, _):
text ,keyboard = await help_parser(CallbackQuery.from_user.mention)
await app.send_message(
CallbackQuery.message.chat.id,
text=_["main5"],
reply_markup=keyboard,
disable_web_page_preview=True,
)
await CallbackQuery.message.delete()
@app.on_callback_query(filters.regex(r"help_(.*?)"))
@languageCB
async def help_button(client, query, _):
home_match = re.match(r"help_home\((.+?)\)", query.data)
mod_match = re.match(r"help_module\((.+?)\)", query.data)
prev_match = re.match(r"help_prev\((.+?)\)", query.data)
next_match = re.match(r"help_next\((.+?)\)", query.data)
back_match = re.match(r"help_back", query.data)
create_match = re.match(r"help_create", query.data)
top_text = _["main5"]
if mod_match:
module = (mod_match.group(1)).replace(" ", "_")
text = (
"{} **{}**:\n".format(
"Here is the help for", HELPABLE[module].__MODULE__
)
+ HELPABLE[module].__HELP__
+ "\n👨💻Dᴇᴠᴇʟᴏᴘᴇʀ : @supunma"
)
if hasattr(HELPABLE[module], "__helpbtns__"):
button = (HELPABLE[module].__helpbtns__) + [[InlineKeyboardButton("« Back", callback_data="bot_commands")]]
if not hasattr(HELPABLE[module], "__helpbtns__"): button = [[InlineKeyboardButton("« Back", callback_data="bot_commands")]]
await query.message.edit(
text=text,
reply_markup=InlineKeyboardMarkup(button),
disable_web_page_preview=True,
)
await query.answer(f"Here is the help for {module}",show_alert=True)
elif home_match:
await app.send_message(
query.from_user.id,
text= _["main2"],
reply_markup=home_keyboard_pm,
)
await query.message.delete()
elif prev_match:
curr_page = int(prev_match.group(1))
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(curr_page - 1, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif next_match:
next_page = int(next_match.group(1))
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(next_page + 1, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif back_match:
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(0, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif create_match:
text, keyboard = await help_parser(query)
await query.message.edit(
text=text,
reply_markup=keyboard,
disable_web_page_preview=True,
)
return await client.answer_callback_query(query.id)
if __name__ == "__main__":
install()
with closing(loop):
with suppress(asyncio.exceptions.CancelledError):
loop.run_until_complete(start_bot())
loop.run_until_complete(asyncio.sleep(3.0))
|
TechShreyash/Yukinon_Robot
|
Yukinon/__main__.py
|
__main__.py
|
py
| 13,537 |
python
|
en
|
code
| 3 |
github-code
|
6
|
35253557105
|
"""
Classes containing different occlusion models (e.g. estimators for shadows, sky view factor etc.)
"""
import hylite
import matplotlib.pyplot as plt
import numpy as np
from hylite.correct import get_hull_corrected
from hylite.multiprocessing import parallel_chunks
def estimate_path_radiance(image, depth, thresh=1):
"""
Apply the dark object subtraction (DOS) method to estimate path radiance in the provided image.
Args:
image: the hyperspectral image (HyImage instance) to estimate path radiance for.
depth: A 2-D (width,height) numpy array of pixel depths in meters. This can be easily computed using
a HyScene instance.
thresh: the percentile threshold to use when selecting dark pixels. Default is 1%.
Returns:
A tuple containing:
- spectra = a numpy array containing the estimated path radiance spectra (in radiance per meter of depth).
- path = a HyImage instance containing the estimated path radiance per pixel (computed by multiplying
the spectra by the depth).
"""
# identify dark pixels
r = np.nanmean(image.data, axis=-1) # calculate mean brightness
assert r.shape == depth.shape, "Error: depth array and HSI have different shapes: %s != %s" % (
str(r.shape), str(depth.shape))
r[r == 0] = np.nan # remove background / true zeros as this can bias the percentile clip
r[np.logical_not(np.isfinite(depth))] = np.nan # remove areas without depth info
r[depth == 0] = np.nan # remove areas without depth info
# extract dark pixels and get median
thresh = np.nanpercentile(r, thresh) # threshold for darkest pixels
darkref = image.data[r <= thresh, :]
darkdepth = depth[r <= thresh] # corresponding depths
# compute path radiance estimate
S = darkref / darkdepth[..., None] # compute estimated path-radiance per meter
S = np.nanpercentile(S, 50, axis=0) # take median of all estimates
# compute per pixel path radiance
P = image.copy(data=False)
P.data = depth[..., None] * S[None, None, :]
return S, P
def correct_path_absorption(data, band_range=(0, -1), thresh=99, atabs = 1126., vb=True):
"""
Fit and remove a known atmospheric water feature to remove atmospheric path absorbtions from reflectance spectra.
See Lorenz et al., 2018 for more details.
Reference:
https://doi.org/10.3390/rs10020176
Args:
image: a hyperspectral image to correct
band_range: a range of bands to do this over. Default is (0,-1), which applies the correction to all bands.
thresh: the percentile to apply when identifying the smallest absorbtion in any range based on hull corrected
spectra. Lower values will remove more absorption (potentially including features of interest).
atabs: wavelength position at which a known control feature is situated that defines the intensity of correction
- for atmospheric effects, this is set to default to 1126 nm
vb: True if a progress bar should be created during hull correction steps.
Returns:
a HyData instance containing the corrected spectra.
"""
assert isinstance(atabs, float), "Absorption wavelength must be float"
# subset dataset
out = data.export_bands(band_range)
nanmask = np.logical_not(np.isfinite(out.data))
out.data[nanmask] = 0 # replace nans with 0
# get depth of water feature at 1126 nm for all pixels
atm_depth = (out.get_band(atabs - 50.) + out.get_band(atabs + 50.)) / 2 - out.get_band(atabs)
# kick out data points with over-/undersaturated spectra
atm_temp = atm_depth.copy()
atm_temp[np.logical_or(np.nanmax(out.data, axis=-1) >= 1, np.nanmax(out.data, axis=-1) <= 0)] = 0
# extract pixels that are affected most by the features
highratio = out.data[atm_temp > np.percentile(atm_temp, 90)]
# hull correct those
hull = get_hull_corrected(hylite.HyData(highratio), vb=vb)
# extract the always consistent absorptions
hull_max = np.nanpercentile(hull.data, thresh, axis=0)
vmin = hull_max[out.get_band_index(atabs)]
# apply adjustment and return
if out.is_image():
nmin = -atm_depth[..., None]
out.data -= ((hull_max[None, None, :] - vmin) * (-nmin) / (1 - vmin) + nmin)
else:
nmin = -atm_depth[..., None]
out.data -= ((hull_max[None, :] - vmin) * (-nmin) / (1 - vmin) + nmin)
out.data[nanmask] = np.nan # add nans back in
out.data = np.clip(out.data, 0, 1)
return out
|
hifexplo/hylite
|
hylite/correct/illumination/path.py
|
path.py
|
py
| 4,573 |
python
|
en
|
code
| 24 |
github-code
|
6
|
20674612571
|
import logging
import sys
from tracker import LogawareMixin, getenv_or_fail
from tracker.fetch.online import JsonEndpointFetcher
from tracker.metadata.retriever import LiftMetadataRetriever
from tracker.metadata.store import LiftMetadataDatabaseRecorder
class LiftMetadataInserter(LogawareMixin):
def __init__(self, lift_metadata_retriever: LiftMetadataRetriever, database_client: LiftMetadataDatabaseRecorder):
super().__init__()
self.lift_metadata_retriever = lift_metadata_retriever
self.database_client = database_client
def insert(self):
lift_metadata = []
for page in range(1, 13):
lift_metadata.extend(self.lift_metadata_retriever.lift_metadata(page))
self._log.debug(f'recording lift state snapshot {lift_metadata}')
self.database_client.record_all(lift_metadata)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s", stream=sys.stdout)
recorder = LiftMetadataDatabaseRecorder(getenv_or_fail('MONGODB_PASS'))
recorder.purge_data()
snapshot_taker = LiftMetadataInserter(
LiftMetadataRetriever(JsonEndpointFetcher.lift_metadata_fetcher(getenv_or_fail('DOLOMITI_BEARER'))),
recorder
)
snapshot_taker.insert()
|
dachrisch/dolomiti-lift-queue
|
tracker/metadata/insert.py
|
insert.py
|
py
| 1,298 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21124445086
|
import linecache
import math
import os
# numpy and revise it with numpy
# multiple sets of 100 test cases
# cosine similarity
# 100 test cases for sum squared errors
# 100 test cases for Average
# small standard deviation -> good prediction
# smaller mean -> the better -> (mean = better prediction)
# transpose matrix to get the same functionality
# transpose matrix ->
FILESIZE = 24983
NUMJOKES = 100
FILENAME = "jester-data-1.csv"
FILENAMENNC = "nearestNeighborsC.csv"
FILENAMENNIB = "nearestNeighborsIB.csv"
def main():
print("Collaborative Average - User 500, Joke 50: " + str(collaborativeAverage(3, 3)))
print("Item Based Average - User 500, Joke 500: " + str(itemBasedAverage(3, 3)))
print("Collaborative Weighted Sum - User 500, Joke 50: " + str(collaborativeWeightedSum(3, 3)))
print("Item Based Weighted Sum - User 500, Joke 50: " + str(itemBasedWeightedSum(3, 3)))
print("Collaborative Adjusted Weighted Sum - User 500, Joke 50: " + str(collaborativeAdjustedWeightedSum(3, 3)))
print("Item Based Adjusted Weighted Sum - User 500, Joke 50: " + str(itemBasedAdjustedWeightedSum(3, 3)))
# print("Nearest Neighbors Collaborative Average - User 500, Joke 50, N 24982: " + str(nearestNeighborsCollaborativeAverage(499, 49, 24982)))
# print("Nearest Neighbors Collaborative Weighted Sum - User 500, Joke 50, N 24982: " + str(nearestNeighborsCollaborativeWeightedSum(499, 49, 24982)))
# print("Nearest Neighbors Collaborative Adjusted Weighted Sum - User 500, Joke 50, N 24982: " + str(nearestNeighborsCollaborativeAdjustedWeightedSum(499, 49, 24982)))
# print("Nearest Neighbors Item Based Average: " + str(nearestNeighborsItemBasedAverage(499, 49, 99)))
# print("Nearest Neighbors Item Based Weighted Sum: " + str(nearestNeighborsItemBasedWeightedSum(499, 49, 99)))
# print("Item Based Adjusted Weighted Sum - User 500, Joke 50: " + str(itemBasedAdjustedWeightedSum(3, 3)))
# print("Nearest Neighbors Item Based Adjusted Weighted Sum: " + str(nearestNeighborsItemBasedAdjustedWeightedSum(499, 49, 99)))
# print("Collaborative Pearson Correlation - User 24983, User 24982: " + str(collaborativePearsonCorrelation(FILESIZE - 1, FILESIZE - 2)))
# print("Item Based Pearson Correlation - Joke 99, Joke 100: " + str(itemBasedPearsonCorrelation(98, 99)))
# print("--------------------------------------------Test Cases ^^^-------------------------------------------------------------------")
# print("Collaborative Average - User 1, Joke 1: " + str(collaborativeAverage(0, 0)))
# print("Item based Average - User 1, Joke 1: " + str(itemBasedAverage(0, 0)))
# print("Collaborative Pearson Correlation - User 1, User 2: " + str(collaborativePearsonCorrelation(0, 1)))
# print("Item Based Pearson Correlation - Joke 1, Joke 2: " + str(itemBasedPearsonCorrelation(3, 3)))
# print("Collaborative Weighted Sum - User 1, Joke 1: " + str(collaborativeWeightedSum(0,0)))
# print("Collaborative Adjusted Weighted Sum - User 1, Joke 1: " + str(collaborativeAdjustedWeightedSum(0,0)))
# print("Item Based Weighted Sum - User 1, Joke 1: " + str(itemBasedWeightedSum(3,3)))
# print("Item Based Adjusted Weighted Sum - User 1, Joke 1: " + str(itemBasedAdjustedWeightedSum(0, 0)))
# print("Nearest Neighbors Collaborative Average - User 500, Joke 50, N 5: " + str(nearestNeighborsCollaborativeAverage(499, 49, 24982)))
# print("Nearest Neighbors Item Based Average: " + str(nearestNeighborsItemBasedWeightedSum(499, 49, 99)))
# given the user number and joke number, find all joke ratings at joke number except at row of user
def collaborativeAverage(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE):
currentUser = 0
count = 0
total = 0
for i in range(0, fileSize):
if currentUser != userNumber:
info = linecache.getline(fileName, i + 1).split(",")
rating = float(info[itemNumber + 1])
if rating != 99:
total += rating
count += 1
currentUser += 1
return total/count
def collaborativeWeightedSum(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE): # need to add appropriate params
normalizationSum = 0
compSum = 0
for i in range(0, fileSize):
if i != userNumber:
info = linecache.getline(fileName, i + 1).split(",")
utilityUserI = float(info[itemNumber + 1])
if utilityUserI != 99:
similarity = collaborativePearsonCorrelation(userNumber, i, fileName)
#print(similarity)
normalizationSum += abs(similarity)
compSum += (similarity * utilityUserI)
return compSum/normalizationSum
def collaborativeAdjustedWeightedSum(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE):
normalizationSum = 0
compSum = 0
for i in range(0, fileSize):
if i != userNumber:
info = linecache.getline(fileName, i + 1).split(",")
utilityUserI = float(info[itemNumber + 1])
if utilityUserI != 99:
similarity = collaborativePearsonCorrelation(userNumber, i, fileName)
#print(similarity)
normalizationSum += abs(similarity)
compSum += (similarity * (utilityUserI - itemBasedAverage(i, -1, fileName)))
return (itemBasedAverage(userNumber, -1, fileName) + (compSum/normalizationSum))
def collaborativePearsonCorrelation(user1Number, user2Number, fileName = FILENAME):
sumNumerator = 0
sumDenominatorUser1 = 0
sumDenominatorUser2 = 0
user1 = linecache.getline(fileName, user1Number + 1).split(",") # linecache indices start with 1
user2 = linecache.getline(fileName, user2Number + 1).split(",")
avgUser1 = itemBasedAverage(user1Number, -1, fileName) # -1 to ensure that it does not skip any joke
avgUser2 = itemBasedAverage(user2Number, -1, fileName)
#print(avgUser1, avgUser2)
for i in range(1, len(user1)):
utilityUser1 = float(user1[i])
utilityUser2 = float(user2[i])
if not (utilityUser1 == 99 or utilityUser2 == 99):
compUser1 = utilityUser1 - avgUser1
compUser2 = utilityUser2 - avgUser2
sumNumerator += compUser1 * compUser2
sumDenominatorUser1 += compUser1 ** 2
sumDenominatorUser2 += compUser2 ** 2
return sumNumerator / math.sqrt(sumDenominatorUser1 * sumDenominatorUser2)
def itemBasedAverage(userNumber, itemNumber, fileName = FILENAME):
total = 0
count = 0
line = linecache.getline(fileName, userNumber + 1)
info = line.split(",")
for i in range(1, len(info)):
if i != itemNumber + 1:
rating = float(info[i])
if rating != 99:
total += rating
count += 1
return total/(count)
def itemBasedWeightedSum(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE):
normalizationSum = 0
compSum = 0
info = linecache.getline(fileName, userNumber + 1).split(",")
for i in range(1, len(info)):
if i != itemNumber + 1:
utilityItemI = float(info[i])
if utilityItemI != 99:
similarity = itemBasedPearsonCorrelation(itemNumber, i - 1, fileName, fileSize)
#print(similarity)
normalizationSum += abs(similarity)
compSum += (similarity * utilityItemI)
return compSum/normalizationSum
def itemBasedAdjustedWeightedSum(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE):
normalizationSum = 0
compSum = 0
info = linecache.getline(fileName, userNumber + 1).split(",")
for i in range(1, len(info)):
if i != itemNumber + 1:
utilityItemI = float(info[i])
if utilityItemI != 99:
similarity = itemBasedPearsonCorrelation(itemNumber, i - 1, fileName, fileSize)
normalizationSum += abs(similarity)
compSum += (similarity * (utilityItemI - collaborativeAverage(-1, i - 1, fileName, fileSize)))
return (collaborativeAverage(-1, itemNumber) + (compSum/normalizationSum))
def itemBasedPearsonCorrelation(item1Number, item2Number, fileName = FILENAME, fileSize = FILESIZE):
sumNumerator = 0
sumDenominatorItem1 = 0
sumDenominatorItem2 = 0
avgItem1 = collaborativeAverage(-1, item1Number, fileName, fileSize) # -1 to ensure that it does not skip any user by
avgItem2 = collaborativeAverage(-1, item2Number, fileName, fileSize)
for i in range(0, fileSize):
line = linecache.getline(fileName, i + 1).split(",");
utilityItem1 = float(line[item1Number + 1])
utilityItem2 = float(line[item2Number + 1])
if not (utilityItem1 == 99 or utilityItem2 == 99): # if either user did not rate the joke, do not calculate
compItem1 = utilityItem1 - avgItem1
compItem2 = utilityItem2 - avgItem2
sumNumerator += compItem1 * compItem2
sumDenominatorItem1 += compItem1 ** 2
sumDenominatorItem2 += compItem2 ** 2
return sumNumerator / math.sqrt(sumDenominatorItem1 * sumDenominatorItem2)
def getNearestNeighborsCollaborative(userNumber, n):
nearestNeighbors = [[-2, -1] for i in range(n)]
for i in range(0, FILESIZE):
if i != userNumber:
info = linecache.getline(FILENAME, i + 1).split(",")
similarity = collaborativePearsonCorrelation(userNumber, i)
if similarity > nearestNeighbors[0][0]:
#print("Before assigning")
#print(nearestNeighbors)
nearestNeighbors[0][0] = similarity
#print("After assigning")
#print(nearestNeighbors)
nearestNeighbors[0][1] = i
#print("Before sorted: ")
#print(nearestNeighbors)
nearestNeighbors = sorted(nearestNeighbors, key=lambda x: x[0])
#print("After sorted: ")
#print(nearestNeighbors)
file = open(FILENAMENNC, 'w')
for j in range(0, n):
file.write(linecache.getline(FILENAME, nearestNeighbors[j][1] + 1))
file.write(linecache.getline(FILENAME, userNumber + 1))
def getNearestNeighborsItemBased(itemNumber, n):
nearestNeighbors = [[-2, -1] for i in range(n)]
for i in range(0, NUMJOKES):
if i != itemNumber:
similarity = itemBasedPearsonCorrelation(itemNumber, i);
if similarity > nearestNeighbors[0][0]:
nearestNeighbors[0][0] = similarity
nearestNeighbors[0][1] = i
nearestNeighbors = sorted(nearestNeighbors, key=lambda x: x[0])
file = open(FILENAMENNIB, 'w')
for i in range(0, FILESIZE):
line = linecache.getline(FILENAME, i+1).rstrip()
info = line.split(',')
count = NUMJOKES - (info[1:]).count(str(99))
lineOut = ""
for j in range(0, n):
lineOut += info[nearestNeighbors[j][1]] + ","
lineOut += info[itemNumber]
lineOut = str(count) + "," + lineOut + "\n"
file.write(lineOut)
def nearestNeighborsCollaborativeAverage(userNumber, itemNumber, n):
getNearestNeighborsCollaborative(userNumber, n)
average = collaborativeAverage(n, itemNumber, FILENAMENNC, n + 1)
deleteFile(FILENAMENNC)
return average
def nearestNeighborsCollaborativeWeightedSum(userNumber, itemNumber, n):
getNearestNeighborsCollaborative(userNumber, n)
weightedSum = collaborativeWeightedSum(n, itemNumber, FILENAMENNC, n + 1)
deleteFile(FILENAMENNC)
return weightedSum
def nearestNeighborsCollaborativeAdjustedWeightedSum(userNumber, itemNumber, n):
getNearestNeighborsCollaborative(userNumber, n)
adjustedWeightedSum = collaborativeAdjustedWeightedSum(n, itemNumber, FILENAMENNC, n + 1)
deleteFile(FILENAMENNC)
return adjustedWeightedSum
def nearestNeighborsItemBasedAverage(userNumber, itemNumber, n):
getNearestNeighborsItemBased(itemNumber, n)
average = itemBasedAverage(userNumber, n, FILENAMENNIB)
deleteFile(FILENAMENNIB)
return average
def nearestNeighborsItemBasedWeightedSum(userNumber, itemNumber, n):
getNearestNeighborsItemBased(itemNumber, n)
weightedSum = itemBasedWeightedSum(userNumber, n, FILENAMENNIB)
deleteFile(FILENAMENNIB)
return weightedSum
def nearestNeighborsItemBasedAdjustedWeightedSum(userNumber, itemNumber, n):
getNearestNeighborsItemBased(itemNumber, n)
adjustedWeightedSum = itemBasedAdjustedWeightedSum(userNumber, n, FILENAMENNIB, FILESIZE)
deleteFile(FILENAMENNIB)
return adjustedWeightedSum
def deleteFile(fileName):
try:
os.remove(fileName)
except:
pass
if __name__ == "__main__":
main()
|
R3xKyle-CP/cpe400
|
jester/prediction2.py
|
prediction2.py
|
py
| 12,781 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10649208417
|
"""
Example
Description: Showcasing the use of Example Images from 'Images.py'
"""
from IMAGES import *
import pygame,sys
pygame.init()
w,h = (1920,1080)
win = pygame.display.set_mode([w,h])
img1 = Rock((255,255,255),(0,0))
img2 = Testing((255,255,255),(0,0))
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
win.fill((125,125,125))
win.blit(img1.img,(0,0))
win.blit(img2.img,(0,0))
pygame.display.flip()
|
LandenTy/GeometricEngine
|
CustomTexturer/Example Images/Example.py
|
Example.py
|
py
| 487 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74874759867
|
import sys
import os
import argparse
from pypiscout.SCout_Logger import Logger as sc
import gprof2dot # pylint: disable=unused-import
# Rationale: Not directly used, but later we do a sys-call wich needs the library. This is needed to inform the user to install the package.
sys.path.append("../")
# pylint: disable=wrong-import-position
# Rationale: This module needs to access modules that are above them in the folder structure.
from Emma.shared_libs.stringConstants import * # pylint: disable=unused-wildcard-import,wildcard-import
import Emma.shared_libs.emma_helper
import genDoc._genCallGraphs
import genDoc._genUmlDiagrams
def ParseArguments():
"""
Argument parser
:return: argparse object containing the parsed options
"""
parser = argparse.ArgumentParser(
prog="Emma - Call graph generator",
description="Script to generate call graphs that can be used in the documentation or to examine the run of Emma and the Emma Visualiser.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--graphviz_bin_folder",
help=r"The bin subfolder of the Graphviz software. Example: c:\Program Files (x86)\Graphviz2.38\bin",
required=False
)
parser.add_argument(
"--verbose",
help="Prints out more info during run.",
default=False
)
parser.add_argument(
"--no_graphs",
help="Do not update graphs (UML + call graph)",
action="store_true",
default=False
)
return parser.parse_args()
def main(arguments):
"""
Main function.
:param arguments: Processed command line arguments.
:return: None
"""
sc(invVerbosity=-1, actionWarning=lambda: sys.exit(-10), actionError=lambda: sys.exit(-10))
sc().header("Generating the Readme documents", symbol="/")
# Give a hint on python sys-call
sc().info("A `python` system call is going to happen. If any errors occur please check the following first:")
if sys.platform == "win32":
sc().info("Windows OS detected. Make sure `python` refers to the Python3 version targeted for this application (-> dependencies; e.g. WSL comes with its own Python).\n")
else:
sc().info("Make sure `python` refers to a Python 3 installation.\n")
# Store original path variables
pathOldValue = os.environ["PATH"]
if not("Graphviz" in os.environ["PATH"] or "graphviz" in os.environ["PATH"]):
if arguments.graphviz_bin_folder is not None:
graphvizBinAbspath = os.path.abspath(arguments.graphviz_bin_folder)
# Add to path
os.environ["PATH"] += (graphvizBinAbspath + ";")
else:
sc().error("The \"graphviz_bin_folder\" was not found in PATH nor was given in the argument --graphviz_bin_folder")
try:
outPath = os.path.abspath(Emma.shared_libs.emma_helper.joinPath("..", README_CALL_GRAPH_AND_UML_PATH))
if not os.path.isdir(outPath):
sc().info("The folder \"" + outPath + "\" was created because it did not exist...")
os.makedirs(README_CALL_GRAPH_AND_UML_PATH)
if not arguments.no_graphs:
# pylint: disable=protected-access
# Rationale: These modules are private so that the users will not use them directly. They are meant to be used trough this script.
genDoc._genCallGraphs.main(arguments)
genDoc._genUmlDiagrams.main()
sc().info("Storing Emma readme as a .html file...")
markdownFilePath = r"../doc/readme-emma.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.\n")
sc().info("Storing Emma Visualiser readme as a .html file...")
markdownFilePath = r"../doc/readme-vis.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.\n")
sc().info("Storing Emma contribution as a .html file...")
markdownFilePath = r"../doc/contribution.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.\n")
sc().info("Storing the test_project readme as a .html file...")
markdownFilePath = r"../doc/test_project/readme.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.\n")
sc().info("Storing the top level README as a .html file...")
# Change the working directory; otherwise we get errors about the relative image import paths in emma_helper.changePictureLinksToEmbeddingInHtmlData()
os.chdir("..")
markdownFilePath = r"../README.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.")
os.chdir("doc") # Change working directory back
except Exception as exception: # pylint: disable=broad-except
# Rationale: We are not trying to catch a specific exception type here.
# The purpose of this is, that the PATH environment variable will be set back in case of an error.
sc().error("An exception was caught:", exception)
# Get back initial path config
os.environ["PATH"] = pathOldValue
if __name__ == "__main__":
main(ParseArguments())
|
bmwcarit/Emma
|
genDoc/genReadmeHtmlFromMd.py
|
genReadmeHtmlFromMd.py
|
py
| 6,000 |
python
|
en
|
code
| 2 |
github-code
|
6
|
34673770578
|
import typed_args as ta
from typing import List, Callable
@ta.argument_parser()
class Args(ta.TypedArgs):
"""
Process some integers.
"""
integers: List[int] = ta.add_argument(
metavar='N', type=int, nargs='+',
# help='an integer for the accumulator'
)
"""
an integer for the accumulator
"""
accumulate: Callable[[List[int]], int] = ta.add_argument(
'--sum',
action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)'
)
args = Args.parse_args()
print(args.accumulate(args.integers))
|
SunDoge/typed-args
|
examples/prog.py
|
prog.py
|
py
| 611 |
python
|
en
|
code
| 11 |
github-code
|
6
|
6296479069
|
turn = ''
shown_turn = ''
winner= ''
check_if_win = False
check_if_tie = False
board = ["-", "-", "-",
"-", "-", "-",
"-", "-", "-"]
def Play_game():
while check_if_win == False or check_if_tie == False:
check_trun = False
print_board = board[0] + " | " + board[1] + " | " + board[2] +"\n"+\
board[3] + " | " + board[4] + " | " + board[5] +"\n"+\
board[6] + " | " + board[7] + " | " + board[8]
print(print_board)
Check_Winner()
check_tie()
if shown_turn == 'X' or 'O' and shown_turn != '' and check_if_win == False:
print("It's " + shown_turn+ " turn")
if winner == 'X' or 'O' and check_if_win == True:
print("Yay " + winner + " won.")
break
elif check_if_tie == True:
print("Tie :(")
break
Player = input("Enter a number between 1 to 9")
while Player not in ["1","2","3","4","5","6","7","8","9"]:
Player= input("only numbers between 1 to 9")
while check_trun ==False:
if board[int(Player) -1 ] == '-':
check_trun = True
board[int(Player) - 1] = Switch_Turn(turn)
else:
print("Used Space try again")
break
def Switch_Turn(turn1):
global turn
global shown_turn
if turn1 == 'X':
turn = 'O'
shown_turn = 'X'
return 'O'
else:
turn = 'X'
shown_turn = 'O'
return 'X'
def Check_Winner():
global winner
global check_if_win
#Rows --
if board[0] == board[1] == board[2] != "-":
winner = board[0]
check_if_win = True
return
elif board[3] == board[4] == board[5] != "-":
winner= board[3]
check_if_win = True
return
elif board[6] == board[7] == board[8] != "-":
winner = board[6]
check_if_win = True
return
#Col --
if board[0] == board[3] == board[6] != "-":
winner = board[0]
check_if_win = True
return
elif board[1] == board[4] == board[7] != "-":
winner = board[1]
check_if_win = True
return
elif board[2] == board[5] == board[8] != "-":
winner = board[2]
check_if_win = True
return
#digno --
if board[0] == board[4] == board[8] != "-":
winner = board[0]
check_if_win = True
return
elif board[2] == board[4] == board[6] != "-":
winner = board[2]
check_if_win = True
return
else:
return
def check_tie():
global check_if_tie
for i in board:
if i == '-':
return
check_if_tie = True
Play_game()
|
SackBiscuit/TicTacToe
|
ExOo.py
|
ExOo.py
|
py
| 2,783 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7919695077
|
#Задать список из N элементов, заполненных числами из [-N, N]. Найти произведение элементов
#на указанных позициях. Позиции хранятся в списке positions - создайте этот список
# (например:positions = [1, 3, 6]).
# 1 вариант
n = int(input("Введите число n:"))
positions = [1, 3, 6]
x = max(positions)
y = int(len(positions))
my_str = []
for i in range(-n, n+1):
my_str.append(i)
total = int(1)
if len(my_str) < x:
print(f'Необходимо ввести значение больше ')
else:
for i in range(1, y+1):
total *= my_str[positions[i-1]]
print(total)
# 2 вариант
def new_spisok(n, n1, n2, n3):
res = []
poz = [n1,n2,n3]
result = 1
for i in range(-n, n+1):
res.append(i)
maxindex = len(res)-1
minindex = len(res)
for n in poz:
if n > maxindex or n < -minindex:
return print("Неверное указали значения индекса")
result *= res[n]
return print (f'В списке {res}, произведение значений с индексами {n1},{n2},{n3} составляет {result}')
number = int(input("Введите число n:"))
poz1 = int(input("Введите индекс первого числа:"))
poz2 = int(input("Введите индекс первого числа:"))
poz3 = int(input("Введите индекс первого числа:"))
new_spisok(number, poz1, poz2, poz3)
|
Savitskiyov/Python
|
Seminar 2/DZ_4.py
|
DZ_4.py
|
py
| 1,593 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
71484286908
|
import sys
import ctypes
def popcount(N):
if sys.platform.startswith('linux'):
libc = ctypes.cdll.LoadLibrary('libc.so.6')
return libc.__sched_cpucount(ctypes.sizeof(ctypes.c_long), (ctypes.c_long * 1)(N))
elif sys.platform == 'darwin':
libc = ctypes.cdll.LoadLibrary('libSystem.dylib')
return libc.__popcountdi2(N)
else:
assert(False)
S = input()
N = len(S)
ans = N
for state in range(1 << N):
cand = 0
used = [False] * N
for i in range(N):
if state >> i & 1:
for j in range(N):
if S[j] == "o":
used[(i + j) % N] = True
if sum(used) == N:
ans = min(ans, popcount(state))
print(ans)
|
knuu/competitive-programming
|
atcoder/arc/arc007_c.py
|
arc007_c.py
|
py
| 719 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34970808449
|
from main import speak, sing
import unittest
class TestMain(unittest.TestCase):
def test_speak(self):
INP = EXP ='OK'
OUT = speak(INP)
assert OUT == EXP, 'It must speak exactly what I asked'
def test_sing(self):
INP = EXP = 'Lala land'
OUT = sing(INP)
assert OUT == EXP, 'It must sing exactly what I asked'
if __name__ == '__main__':
unittest.main()
|
nhtua/log2mongo
|
tests/test_main.py
|
test_main.py
|
py
| 416 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21446386978
|
import csv
from pylab import *
import sys
if len(sys.argv) != 4:
print('Error, ejecucion: python estadisticas.py <topico> <fecha_ini(yyyy-mm-dd)> <fecha_fin(yyyy-mm-dd)>')
print('Para omitir un parametro dejar el valor \'\'')
exit()
topico = str(sys.argv[1]) #str(input("ingrese topico: "))
date_ini = str(sys.argv[2]) #str(input("fecha de inicio (yyyy-mm-dd): "))
date_fin = str(sys.argv[3]) #str(input("fecha de fin (yyyy-mm-dd): "))
fecha_ini = int(date_ini.replace("-", "")+'000000')
fecha_fin = int(date_fin.replace("-", "")+'235959')
data_csv = open('tweets_nyctsubway.csv', 'r', encoding='latin1')
data = csv.reader(data_csv, delimiter=';', quotechar='\"')
comp = []
neg = []
neu = []
pos = []
a = 0
for row in data:
if a == 0:
a += 1
continue
tweet_date = int(row[2])
if topico != '' and (fecha_fin == '235959' or fecha_ini == "000000"):
if topico == row[1]:
comp.append(float(row[3]))
neg.append(float(row[4]))
neu.append(float(row[5]))
pos.append(float(row[6]))
elif topico != '' and fecha_fin != '235959' and fecha_ini != "000000":
if topico == row[1] and tweet_date <= fecha_fin and tweet_date >= fecha_ini:
comp.append(float(row[3]))
neg.append(float(row[4]))
neu.append(float(row[5]))
pos.append(float(row[6]))
elif topico == '' and fecha_fin != '235959' and fecha_ini != "000000":
if tweet_date <= fecha_fin and tweet_date >= fecha_ini:
comp.append(float(row[3]))
neg.append(float(row[4]))
neu.append(float(row[5]))
pos.append(float(row[6]))
if len(pos) == 0:
print("No hay datos para los parametros usados.")
exit()
else:
print('Analizando', len(pos), 'tweets...')
print('-> Positividad', sum(pos)/len(pos))
print('-> Negatividad', sum(neg)/len(neg))
print('-> Polaridad', sum(comp)/len(comp))
tot_pos = sum(pos)
tot_neg = sum(neg)
tot_neu = sum(neu)
ax = axes([0, 0, 0.9, 0.9])
labels = 'Positivos ', 'Negativos', 'Neutros'
fracs = [tot_pos, tot_neg, tot_neu]
pie(fracs, labels=labels, autopct='%10.0f%%', shadow=True)
legend()
title('Estadisticas de tweets', bbox={'facecolor': '0.8', 'pad': 5})
show()
|
gonzalezf/tweetometro
|
estadisticas.py
|
estadisticas.py
|
py
| 2,282 |
python
|
es
|
code
| 1 |
github-code
|
6
|
40725138206
|
from turtle import Turtle, Screen
import random
screen = Screen()
screen.setup(width=500, height=400)
colors = ["red", "orange", "yellow", "green", "blue", "purple"]
user_bet = ""
while user_bet not in colors:
user_bet = screen.textinput(title="Make your bet", prompt="Which turtle will win the race? Enter a color: ")
is_race_on = False
turtles = []
y = -100
for i in range(0, 6):
new_turtle = Turtle(shape="turtle")
new_turtle.pu()
new_turtle.color(colors[i])
new_turtle.goto(x=-235, y=y)
turtles.append(new_turtle)
y += 40
if user_bet:
is_race_on = True
while is_race_on:
for turtle in turtles:
if turtle.xcor() > 230:
is_race_on = False
winning_color = turtle.pencolor()
if winning_color == user_bet:
print("You have won!")
else:
print("You have lost.")
print(f"The {winning_color} color turtle is the winner!")
else:
rand_distance = random.randint(0, 10)
turtle.forward(rand_distance)
screen.exitonclick()
|
abishekbalaji/hundred_days_of_code_python
|
turtle-race/main.py
|
main.py
|
py
| 1,085 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13488129623
|
#!/usr/bin/python3
"""Returns the number of lines of a text file"""
def read_lines(filename="", nb_lines=0):
"""Reads a text file and return the number of lines"""
lines = 0
with open(filename) as a_file:
i = len(list(a_file))
if nb_lines >= i or nb_lines <= 0:
nb_lines = i
a_file.seek(0, 0)
for l in range(nb_lines):
print(a_file.readline(), end="")
|
1uiscalderon/holbertonschool-higher_level_programming
|
0x0B-python-input_output/2-read_lines.py
|
2-read_lines.py
|
py
| 422 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1909503331
|
import unittest
import mock
from hpOneView.connection import connection
from hpOneView.resources.networking.sas_logical_interconnects import SasLogicalInterconnects
from hpOneView.resources.resource import ResourceClient
class SasLogicalInterconnectsTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._client = SasLogicalInterconnects(self.connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._client.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(count=500, fields='name=TestName', filter='name:ascending', query='',
sort='', start=2, view='')
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once_with_default(self, mock_get_all):
self._client.get_all()
mock_get_all.assert_called_once_with(count=-1, fields='', filter='', query='', sort='', start=0, view='')
@mock.patch.object(ResourceClient, 'get')
def test_get_by_id_called_once(self, mock_get):
logical_interconnect_id = "f0a0a113-ec97-41b4-83ce-d7c92b900e7c"
self._client.get(logical_interconnect_id)
mock_get.assert_called_once_with(logical_interconnect_id)
@mock.patch.object(ResourceClient, 'get')
def test_get_by_uri_called_once(self, mock_get):
logical_interconnect_uri = "/rest/sas-logical-interconnects/f0a0a113-ec97-41b4-83ce-d7c92b900e7c"
self._client.get(logical_interconnect_uri)
mock_get.assert_called_once_with(logical_interconnect_uri)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get):
self._client.get_by("name", "value")
mock_get.assert_called_once_with("name", "value")
@mock.patch.object(ResourceClient, 'create')
def test_replace_drive_enclosure_called_once(self, mock_create):
drive_replacement = {
"oldSerialNumber": "SN1100",
"newSerialNumber": "SN1101"
}
self._client.replace_drive_enclosure(drive_replacement, "ad28cf21-8b15-4f92-bdcf-51cb2042db32")
mock_create.assert_called_once_with(
drive_replacement.copy(),
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/replaceDriveEnclosure')
@mock.patch.object(ResourceClient, 'update')
def test_update_compliance_all_called_once(self, mock_update):
compliance_uris = {
"uris": [
"/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32"
]}
self._client.update_compliance_all(compliance_uris)
mock_update.assert_called_once_with(compliance_uris.copy(),
'/rest/sas-logical-interconnects/compliance',
timeout=-1)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_compliance_by_uri(self, mock_update_with_zero_body):
logical_interconnect_uri = '/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._client.update_compliance(logical_interconnect_uri)
mock_update_with_zero_body.assert_called_once_with(
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/compliance', timeout=-1)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_compliance_by_id(self, mock_update_with_zero_body):
mock_update_with_zero_body.return_value = {}
logical_interconnect_id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._client.update_compliance(logical_interconnect_id)
mock_update_with_zero_body.assert_called_once_with(
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/compliance', timeout=-1)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_configuration_by_uri(self, mock_update_with_zero_body):
logical_interconnect_uri = '/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._client.update_configuration(logical_interconnect_uri)
mock_update_with_zero_body.assert_called_once_with(
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/configuration')
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_configuration_by_id(self, mock_update_with_zero_body):
logical_interconnect_id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._client.update_configuration(logical_interconnect_id)
mock_update_with_zero_body.assert_called_once_with(
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/configuration')
@mock.patch.object(ResourceClient, 'get')
def test_get_firmware(self, mock_get):
logical_interconnect_id = '3518be0e-17c1-4189-8f81-83f3724f6155'
logical_interconnect_uri = "/rest/sas-logical-interconnects/" + logical_interconnect_id
expected_uri = logical_interconnect_uri + "/firmware"
self._client.get_firmware(logical_interconnect_id)
mock_get.assert_called_once_with(expected_uri)
@mock.patch.object(ResourceClient, 'update')
def test_update_firmware(self, mock_update):
logical_interconnect_id = '3518be0e-17c1-4189-8f81-83f3724f6155'
fake_firmware = dict(
command="Update",
sppUri="/rest/firmware-drivers/Service_0Pack_0for_0ProLiant"
)
logical_interconnect_uri = "/rest/sas-logical-interconnects/" + logical_interconnect_id
expected_uri = logical_interconnect_uri + "/firmware"
self._client.update_firmware(fake_firmware, logical_interconnect_id)
mock_update.assert_called_once_with(fake_firmware, expected_uri)
|
HewlettPackard/python-hpOneView
|
tests/unit/resources/networking/test_sas_logical_interconnects.py
|
test_sas_logical_interconnects.py
|
py
| 5,962 |
python
|
en
|
code
| 86 |
github-code
|
6
|
32072532623
|
class Player:
def __init__(self, name, id, pos, switch, order, sub, sub_id, status, team):
self.name = name
self.id = id
self.pos = pos
self.switch = switch
self.order = order
self.sub = sub
self.sub_id = sub_id
self.status = status # 'available', 'entered', 'removed'
self.team = team
self.pbp_name = None
|
milesokamoto/pbpy
|
modules/player.py
|
player.py
|
py
| 391 |
python
|
en
|
code
| 5 |
github-code
|
6
|
21917306491
|
from django.shortcuts import render, redirect
from .models import *
from hashlib import sha1
from django.http import JsonResponse, HttpResponseRedirect
from . import user_decorator
from df_goods.models import *
def register(request):
return render(request, 'df_user/register.html')
def register_handle(request):
# 接受用户输入
post = request.POST
uname = post.get('user_name')
upwd = post.get('pwd')
upwd2 = post.get('cpwd')
uemail = post.get('email')
# 判断两次密码是否一次
if upwd != upwd2:
return redirect('/user/register/')
# 密码加密
s1 = sha1()
s1.update(upwd.encode("utf8"))
upwd3 = s1.hexdigest()
# 创建对象
user = UserInfo()
user.uname = uname
user.upwd = upwd3
user.uemail = uemail
user.save()
# 注册成功,转到登录页面
context = {'title': '用户登录', 'uname': uname, 'page_name': 1}
return render(request, 'df_user/login.html', context)
def register_exist(request):
uname = request.GET.get('uname')
print(uname)
count = UserInfo.objects.filter(uname=uname).count()
print(count)
return JsonResponse({'count': count})
def login(request):
uname = request.COOKIES.get('uname', '')
context = {'title': '用户登录', 'error_name': 0, 'error_pwd': 0, "uname": uname, 'page_name': 1}
return render(request, 'df_user/login.html', context)
def login_handle(request):
# 接受请求信息
post = request.POST
uname = post.get('username')
upwd = post.get('pwd')
remember_name = post.get('remember_name', 0)
# 根据用户名查询对象
users = UserInfo.objects.filter(uname=uname) # []
print(uname)
# 判断:如果未查到则用户名错,如果查到则判断密码是否正确,正确则转到用户中心
if len(users) == 1:
s1 = sha1()
s1.update(upwd.encode("utf8"))
if s1.hexdigest() == users[0].upwd:
url = request.COOKIES.get('url', '/')
red = HttpResponseRedirect(url)
# 记住用户名
if remember_name != 0:
red.set_cookie('uname', uname)
else:
red.set_cookie('uname', '', max_age=-1) # max_age过期时间
request.session['user_id'] = users[0].id
request.session['user_name'] = uname
return red
else:
context = {'title': '用户登录', 'error_name': 0, 'error_pwd': 1, 'uname': uname, 'upwd': upwd, 'page_name': 1}
return render(request, 'df_user/login.html', context)
else:
context = {'title': '用户登录', 'error_name': 1, 'error_pwd': 0, 'uname': uname, 'upwd': upwd, 'page_name': 1}
return render(request, 'df_user/login.html', context)
def logout(request):
request.session.clear()
return HttpResponseRedirect('/')
@user_decorator.login
def info(request):
user_email = UserInfo.objects.get(id=request.session['user_id']).uemail
# 最近浏览
goods_ids = request.COOKIES.get('goods_ids', '')
goods_ids1 = goods_ids.split(',')
goods_list = []
for goods_id in goods_ids1:
goods_list.append(GoodsInfo.objects.get(id=int(goods_id)))
context = {'title': '用户中心',
'user_email': user_email,
'user_name': request.session['user_name'],
'page_name': 1,
'goods_list': goods_list,
}
return render(request, 'df_user/user_center_info.html', context)
@user_decorator.login
def order(request):
context = {'title': '订单中心', 'page_name': 1}
return render(request, 'df_user/user_center_order.html', context)
@user_decorator.login
def site(request):
user = UserInfo.objects.get(id=request.session['user_id'])
print(user.id)
if request.method == 'POST':
post = request.POST
user.urece = post.get('urece')
user.uaddress = post.get('uaddress')
user.uzip = post.get('uzip')
user.uphone = post.get('uphone')
user.save()
context = {'title': '收货地址', 'user': user, 'page_name': 1}
return render(request, 'df_user/user_center_site.html', context)
|
junjie0825/dailyfresh
|
dailyfresh/df_user/views.py
|
views.py
|
py
| 4,210 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14177953222
|
"""Holds environmental variables, sets up custom logger."""
import logging
import os
log = logging.getLogger(name="log")
# declare environment constants
COSMOSDB_CONNECTION_STRING: str = os.environ["COSMOSDB_CONNECTION_STRING"]
COSMOSDB_DATABASE_ID: str = os.environ["COSMOSDB_DATABASE_ID"]
COSMOSDB_CONTAINER_ID: str = os.environ["COSMOSDB_CONTAINER_ID"]
BLOB_SERVICE_CONNECTION_STRING: str = os.environ["BLOB_CONNECTION_STRING"]
BLOB_CONTAINER_NAME: str = os.environ["BLOB_CONTAINER_NAME"]
def logger(
logging_format: str = "%(levelname)s, %(name)s.%(funcName)s: %(message)s",
level: int = logging.INFO,
) -> None:
"""
Sets up custom logger.
Parameters:
format (str, optional): Logging format. Defaults to "%(name)s%(funcName)s: %(message)s".
level (int, optional): Logging level. Defaults to logging.INFO.
Returns:
None
"""
log.debug(msg="Setting up custom logger.")
log.setLevel(level=level)
handler = logging.StreamHandler(stream=None)
formatter = logging.Formatter(fmt=logging_format)
handler.setFormatter(fmt=formatter)
if log.hasHandlers():
log.handlers.clear()
log.addHandler(handler)
|
wieczorekgrzegorz/ksef-krportal-communication
|
utilities/setup.py
|
setup.py
|
py
| 1,194 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30925855450
|
from Game import Game
from Player import Player
from Players import Players
print('::::: PROBLEM 1 :::::')
player_1 = Player( 1 , "Kirat Boli" , [5,30,25,10,15,1,9,5] )
player_2 = Player( 2 , "N.S Nodhi" , [10,40,20,5,10,1,4,10] )
player_3 = Player( 3 , "R Rumrah" , [20,30,15,5,5,1,4,20] )
player_4 = Player( 4 , "Shashi Henra" , [30,25,5,0,5,1,4,30] )
players_list = [player_1,player_2,player_3,player_4]
players = Players(players_list,"Lengaburu")
problem_1 = Game(players,40,24)
problem_1.runGame()
problem_1.printGameSummary()
print('::::: PROBLEM 2 :::::')
player_1_1 = Player( 1 , "Kirat Boli" , [5,10,25,10,25,1,14,10] )
player_2_1 = Player( 2 , "N.S Nodhi" , [5,15,15,10,20,1,19,15] )
player_1_2 = Player( 3 , "DB Vellyers" , [5,10,25,10,25,1,14,10] )
player_2_2 = Player( 4 , "H Mamla" , [10,15,15,10,20,1,19,15] )
players_list_1 = [player_1_1,player_2_1]
players_list_2 = [player_1_2,player_2_2]
players_1 = Players(players_list_1,"Lengaburu")
players_2 = Players(players_list_2,"Enchai")
problem_2_1 = Game(players_1,999,12)
problem_2_1.runGame(False)
problem_2_2 = Game(players_2,problem_2_1.players.team_score,12)
problem_2_2.runGame()
problem_2_2.printGameSummary()
|
jonafrank13/python_example
|
GameSimulator.py
|
GameSimulator.py
|
py
| 1,264 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27672728231
|
from typing import Optional
import torch
from torch import nn
from config import dt_config
from block import TransformerBlock
class DecisionTransformer(nn.Module):
def __init__(self,
cfg: dt_config,
state_dim: int,
action_dim: int) -> None:
super().__init__()
self.embedding_dropout = nn.Dropout(cfg.embedding_dropout)
self.embedding_norm = nn.LayerNorm(cfg.embedding_dim)
self.final_norm = nn.LayerNorm(cfg.embedding_dim)
self.positional_encoding = nn.Embedding(cfg.episode_length + cfg.sequence_length,
cfg.embedding_dim)
self.state_embedding = nn.Linear(state_dim, cfg.embedding_dim)
self.action_embedding = nn.Linear(action_dim, cfg.embedding_dim)
self.return_embedding = nn.Linear(1, cfg.embedding_dim)
self.blocks = nn.ModuleList([
TransformerBlock(3 * cfg.sequence_length,
cfg.embedding_dim,
cfg.num_heads,
cfg.attention_dropout,
cfg.residual_dropout) for _ in range(cfg.num_layers)
])
self.embedding_dim = cfg.embedding_dim
self.sequence_length = cfg.sequence_length
self.state_dim = state_dim
self.action_dim = action_dim
self.episode_length = cfg.episode_length
self.max_action = cfg.max_action
self.action_head = nn.Sequential(
nn.Linear(self.embedding_dim, self.action_dim),
nn.Tanh()
)
self.apply(self.reset_weights)
@staticmethod
def reset_weights(m: nn.Module):
if isinstance(m, (nn.Linear, nn.Embedding)):
nn.init.normal_(m.weight, mean=0.0, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.zeros_(m.bias)
if isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def forward(self,
states: torch.Tensor,
actions: torch.Tensor,
mc_returns: torch.Tensor,
time_steps: torch.Tensor,
key_padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
batch_size, sequence_length = states.shape[0], states.shape[1]
pos_encoding = self.positional_encoding(time_steps)
state_embedding = self.state_embedding(states) + pos_encoding
action_embedding = self.action_embedding(actions) + pos_encoding
returns_embedding = self.return_embedding(mc_returns.unsqueeze(-1)) + pos_encoding
sequence = torch.stack((
returns_embedding, state_embedding, action_embedding
), dim=1).permute(0, 2, 1, 3).reshape(batch_size, 3 * sequence_length, self.embedding_dim)
if key_padding_mask is not None:
key_padding_mask = torch.stack((
key_padding_mask, key_padding_mask, key_padding_mask
), dim=1).permute(0, 2, 1).reshape(batch_size, 3 * sequence_length)
out = self.embedding_dropout(self.embedding_norm(sequence))
for block in self.blocks:
out = block(out, padding_mask=key_padding_mask)
out = self.final_norm(out)
return self.action_head(out[:, 1::3]) * self.max_action
|
zzmtsvv/rl_task
|
decision_transformer/model.py
|
model.py
|
py
| 3,401 |
python
|
en
|
code
| 8 |
github-code
|
6
|
25289541604
|
from os import mkdir, getcwd
from os.path import join,exists
def new_file(file_name, data):
"""
create new file
"""
with open(file_name, 'w', encoding='utf-8') as file:
file.write(data)
def new_static_dir(project_name):
"""
root_dir/static
"""
static_path = join(getcwd(), project_name, 'static')
if exists(static_path):
return
mkdir(static_path)
def new_static_readme(project_name):
"""
root_dir/static/README.md
"""
content = [
f'# {project_name} Docs',
'\n',
' 文件存放文件夹',
' - 若是算法模型,可将数据和算法模型下载到此文件夹',
' - 若是容器服务,此文件夹为容器映射文件夹,做持久化使用'
]
readme_file = join(getcwd(), project_name, 'static', 'README.md')
data = '\n'.join(content)
new_file(readme_file, data)
def new_static(project_name):
"""
Export static
"""
new_static_dir(project_name)
new_static_readme(project_name)
|
LABELNET/my-ctl
|
my_ctl/py_template/app_dir_static.py
|
app_dir_static.py
|
py
| 1,037 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23158389057
|
import os
import argparse
import torch
from torchvision import datasets, transforms
from torch.utils.data.sampler import WeightedRandomSampler
from sklearn.model_selection import train_test_split
from collections import Counter
import numpy as np
# Transform and to normalize the data [0.0, 1.0]
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
transform_test = transforms.Compose([transforms.ToTensor()])
# Cifar10 classes
cifar10_classes = ('airplane 0', 'automobile 1', 'bird 2', 'cat 3',
'deer 4', 'dog 5', 'frog 6', 'horse 7', 'ship 8', 'truck 9')
def to_numpy(tensor):
'''To convert the torch tensor into numpy
'''
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def index_to_label(index_dict):
'''To crete a new dict by replacing the keys (class-indexes) with class-labels
'''
new_dict = {}
for key in range(len(index_dict)):
new_dict[cifar10_classes[key]] = index_dict[key]
return new_dict
def prepareTrainset(args, X_train, y_train):
'''
*** Usase:
# prepare the training set where we limit no of samples in some classes
# Rest of the classes will take all available samples
*** parameters:
args:
args.classes_to_limit: Classes where we need less samples, [2,4,9]-> ['bird', 'deer', 'truck']
args.data_limit_in_classes: samples limit, 2400
X_train: Original training data(images)
y_train: Original training targets(classes)
*** Return
training set with desired no of samples in each class
'''
X_train = np.rollaxis(X_train, 3, 1)
X_train = (X_train/255.0)
X_train = X_train.astype(np.float32)
train_idx = []
for i in range(10):
indexes = [idx for idx in range(len(y_train)) if y_train[idx] == i]
if i in args.classes_to_limit:
indexes = indexes[:args.data_limit_in_classes]
train_idx.extend(indexes)
else:
train_idx.extend(indexes)
trainset = [(X_train[i], y_train[i]) for i in train_idx]
if args.verbose:
y_train = [y_train[id] for id in train_idx]
print(f'\nTraining dataset: \n{len(y_train)}\n{index_to_label(dict(Counter(y_train)))}')
return trainset
def prepareValset(args, X_val, y_val):
'''Prepare validation set with 1,000 samples where each class has 100 samples
'''
X_val = np.rollaxis(X_val, 3, 1)
X_val = (X_val/255.0)
X_val = X_val.astype(np.float32)
valset = [(X_val[i], y_val[i]) for i in range(len(X_val))]
# Verbose
if args.verbose:
print(f'\nValidation dataset: \n{len(y_val)}\n{index_to_label(dict(Counter(y_val)))}')
return valset
def train_data_sampler(args, y_train):
''' Sampling strategy for the training batches
Weighted over sampling: Building a multinomial distribution over the set of observations
where each observation behaves as its own class with a controlled probability of being drawn
'''
train_idx = []
for i in range(10):
indexes = [idx for idx in range(len(y_train)) if y_train[idx] == i]
if i in args.classes_to_limit:
indexes = indexes[:args.data_limit_in_classes]
train_idx.extend(indexes)
else:
train_idx.extend(indexes)
train_targets = [y_train[i] for i in train_idx]
class_sample_count = np.unique(train_targets, return_counts=True)[1]
weight = 1. / class_sample_count
samples_weight = weight[train_targets]
samples_weight = torch.from_numpy(samples_weight)
sampler = WeightedRandomSampler(samples_weight, num_samples=len(samples_weight), replacement=False)
return sampler
def loadCIFAR10(args):
''' Preparing the traning, val, test data loaders
# Training set : `args.classes_to_limit` classes will have `args.data_limit_in_classes` samples, other classes will have 4900 samples
# Validation set: 1,000 samples (making sure that 100 images are in each class)
# Test set : 10,000 (By default 1000 images are in each class)
'''
if args.verbose:
print('\n***** CIFAR-10 DATASET')
# path to save CIFAR10 data
path = f'{os.path.dirname(os.path.dirname(__file__))}/data'
# Download and load the CIFAR10 dataset
train_val_set = datasets.CIFAR10(path, download = True, train = True, transform = transform_train)
testset = datasets.CIFAR10(path, download = True, train = False, transform = transform_test)
# Divide the CIFAR10 training samples into training and validation set
# Training set : 49,000 samples
# Validation set : 1,000 samples (making sure that 100 images are in each class)
X_train, X_val, y_train, y_val = train_test_split(train_val_set.data, train_val_set.targets, test_size=0.02, train_size=0.98, stratify=train_val_set.targets, shuffle=True, random_state=42)
trainset = prepareTrainset(args, X_train, y_train)
valset = prepareValset(args, X_val, y_val)
# Train, Val, Test Dataset Loaders
if args.data_sampling == None:
trainLoader = torch.utils.data.DataLoader(trainset, batch_size = args.batch_size, shuffle=True)
elif args.data_sampling == 'weightedOverSampling':
# Weighted Oversampler for trainLoader
train_sampler = train_data_sampler(args, y_train)
trainLoader = torch.utils.data.DataLoader(trainset, batch_size = args.batch_size, sampler=train_sampler)
valLoader = torch.utils.data.DataLoader(valset, batch_size = args.batch_size, shuffle = True)
testLoader = torch.utils.data.DataLoader(testset, batch_size = args.batch_size, shuffle = True)
if args.verbose:
print(f'\nTest dataset: \n{len(testset.targets)}\n{index_to_label(dict(Counter(testset.targets)))}')
return trainLoader, valLoader, testLoader
def loadCIFAR10_testset(batch_size = 100):
# path to save CIFAR10 data
path = f'{os.path.dirname(os.path.dirname(__file__))}/data'
# Download and load the testset
testset = datasets.CIFAR10(path, download = True, train = False, transform = transform_test)
testLoader = torch.utils.data.DataLoader(testset, batch_size = batch_size, shuffle = True)
return testLoader
# To check and visualize dataset independently
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Dataset_loader')
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--classes_to_limit', default=[2, 4, 9], choices=[i for i in range(10)])
parser.add_argument('--data_limit_in_classes', default=2450, type=int)
parser.add_argument('--verbose', default=True, type=bool)
parser.add_argument('--visualize_a_batch', default=True, type=bool)
parser.add_argument('--data_sampling', default='weightedOverSampling', type=str,
choices=['weightedOverSampling', None],
help='Data sampling to tackle imbalanced dataset')
args = parser.parse_args()
# Cifar10-dataset data loaders
trainLoader, valLoader, testLoader = loadCIFAR10(args)
if args.visualize_a_batch:
print('\n***** Visualize a batch')
dataiter = iter(trainLoader)
images, labels = dataiter.next()
print(images.shape, labels.shape)
print(f'Pixel Values are B/W: [{torch.min(images).item()}, {torch.max(images).item()}]')
print('\n***** Visualize some batches to see class distributions after applying weighted data over sampling')
class_distribution = []
for i, data in enumerate(trainLoader):
_, labels = data
class_distribution.append(np.unique(labels, return_counts=True)[1])
print(class_distribution[i])
if i > 9:
break
print('\n**** class-wise average distribution in batches after applying weighted data over sampling')
class_distribution = np.array(class_distribution)
class_distribution_avg = np.average(class_distribution, axis=0)
print(f'{np.round(class_distribution_avg, decimals=2)}\n')
|
minkeshtu/Imbalanced-Cifar-10-classification
|
dataset/cifar10Loader.py
|
cifar10Loader.py
|
py
| 8,218 |
python
|
en
|
code
| 3 |
github-code
|
6
|
3407362691
|
from queue import Queue
from adjacencyset import *
def sort_topology(graph):
queue = Queue()
in_degree_map = {}
for v in range(graph.numVertices):
in_degree_map[v] = graph.get_indegree(v)
if in_degree_map[v] == 0:
queue.put(v)
sorted = []
while not queue.empty():
v = queue.get()
sorted.append(v)
for _v in graph.get_adjacent_vertices(v):
in_degree_map[_v] = in_degree_map[_v] - 1
if in_degree_map[_v] == 0:
queue.put(_v)
if len(sorted) != graph.numVertices:
raise ValueError("Graph is a cyclic")
print(sorted)
a = AdjacencyGraphSet(9, True)
a.add_edge(0,1)
a.add_edge(1,2)
a.add_edge(2,7)
a.add_edge(2,4)
a.add_edge(2,3)
a.add_edge(1,5)
a.add_edge(5,6)
a.add_edge(3,6)
a.add_edge(3,4)
a.add_edge(6,8)
sort_topology(a)
|
VimleshS/python-graph-ds
|
topological_sort.py
|
topological_sort.py
|
py
| 860 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25328118850
|
import cv2
import numpy as np
from scipy.spatial import cKDTree
from sklearn.decomposition import PCA
def sample_to_heatmap(points, x_adjust=0, y_adjust=0, z_threshold=None, nearest_k=3):
# If a threshold is provided, keep only points with a z-coordinate above this threshold
if z_threshold is not None:
points = points[points[:, 2] > z_threshold]
print('cropped')
# Compute PCA
pca = PCA(n_components=3)
pca.fit(points)
# The normal of the plane is the smallest principal component
normal = pca.components_[-1]
# The point on the plane can be the centroid of the point cloud
centroid = np.mean(points, axis=0)
# Now we can print the plane equation
# The plane equation is of the form ax + by + cz + d = 0
a, b, c = normal
d = -centroid.dot(normal)
print(f"The equation of the plane is {a:.5f}x + {b:.5f}y + {c:.5f}z + {d:.5f} = 0")
# Get x, y, z coordinates
x_coords = points[:, 0]
y_coords = points[:, 1]
z_coords = points[:, 2]
# Calculate minimum and maximum values in x and y directions
x_min, x_max = np.min(x_coords), np.max(x_coords)
y_min, y_max = np.min(y_coords), np.max(y_coords)
x_mid = (x_min + x_max) / 2 + x_adjust
y_mid = (y_min + y_max) / 2 + y_adjust
# The range of x and y values for the mesh grid
x_range = np.linspace(x_mid - 15, x_mid + 15, 514)
y_range = np.linspace(y_mid - 15, y_mid + 15, 514)
x, y = np.meshgrid(x_range, y_range)
# Compute corresponding z values for the plane
z = (-a * x - b * y - d) / c
tree = cKDTree(points)
distances = []
for point in np.vstack([x.flatten(), y.flatten(), z.flatten()]).T:
# Find the three nearest points in the point cloud
dists, idxs = tree.query(point, k=nearest_k)
nearest_points = points[idxs]
# For each nearest point, compute the distance to the point along the normal direction
ds = []
for nearest_point in nearest_points:
displacement = nearest_point - point # vector from point to nearest_point
distance = np.dot(displacement, normal) # project displacement onto normal
ds.append(distance)
distances.append(sum(ds) / len(ds))
# 这里是用最小值纠正(normalisation)矩阵
distances_array = (np.array(distances) - np.min(distances)) / 0.5 * 255
distances_reshape = distances_array.reshape((514, 514))[1:513, 1:513].astype(int)
return distances_reshape # 这个就是image 直接save就行
# Create a heatmap using seaborn
def plot_heatmap(heatmap, save_path=None):
cv2.imwrite(save_path, heatmap)
|
jichengzhi/cube-sampling
|
heatmap.py
|
heatmap.py
|
py
| 2,657 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6969826416
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import torch
class EMA_FM():
def __init__(self, decay=0.9, first_decay=0.0, channel_size=512, f_map_size=196, is_use = False):
self.decay = decay
self.first_decay = first_decay
self.is_use = is_use
self.shadow = {}
self.epsional = 1e-5
if is_use:
self._register(channel_size=channel_size, f_map_size= f_map_size)
def _register(self, channel_size=512, f_map_size=196):
Init_FM = torch.zeros((f_map_size, channel_size),dtype=torch.float)
self.shadow['FM'] = Init_FM.cuda().clone()
self.is_first = True
def update(self, input):
B, C, _ = input.size()
if not(self.is_use):
return torch.ones((C,C), dtype=torch.float)
decay = self.first_decay if self.is_first else self.decay
####### FEATURE SIMILARITY MATRIX EMA ########
# Mu = torch.mean(input,dim=0)
self.shadow['FM'] = (1.0 - decay) * torch.mean(input,dim=0) + decay * self.shadow['FM']
self.is_first = False
return self.shadow['FM']
class Cluster_loss():
def __init__(self):
pass
def update(self, correlation, loss_mask_num, loss_mask_den, labels):
batch, channel, _ = correlation.shape
c, _, _ = loss_mask_num.shape
if labels is not None:
label_mask = (1 - labels).view(batch, 1, 1)
## smg_loss if only available for positive sample
correlation = correlation * label_mask
correlation = (correlation / batch).view(1, batch, channel, channel).repeat(c, 1, 1, 1)
new_Num = torch.sum(correlation * loss_mask_num.view(c, 1, channel, channel).repeat(1, batch, 1, 1),
dim=(1, 2, 3))
new_Den = torch.sum(correlation * (loss_mask_den).view(c, 1, channel, channel).repeat(1, batch, 1, 1),
dim=(1, 2, 3))
ret_loss = -torch.sum(new_Num / (new_Den + 1e-5))
return ret_loss
class Multiclass_loss():
def __init__(self, class_num=None):
self.class_num = class_num
def get_label_mask(self, label):
label = label.cpu().numpy()
sz = label.shape[0]
label_mask_num = []
label_mask_den = []
for i in range(self.class_num):
idx = np.where(label == i)[0]
cur_mask_num = np.zeros((sz, sz))
cur_mask_den = np.zeros((sz, sz))
for j in idx:
cur_mask_num[j][idx] = 1
cur_mask_den[j][:] = 1
label_mask_num.append(np.expand_dims(cur_mask_num, 0))
label_mask_den.append(np.expand_dims(cur_mask_den, 0))
label_mask_num = np.concatenate(label_mask_num, axis=0)
label_mask_den = np.concatenate(label_mask_den, axis=0)
return torch.from_numpy(label_mask_num).float().cuda(), torch.from_numpy(label_mask_den).float().cuda()
def update(self, fmap, loss_mask_num, label):
B, C, _, _ = fmap.shape
center, _, _ = loss_mask_num.shape
fmap = fmap.view(1, B, C, -1).repeat(center, 1, 1, 1)
mean_activate = torch.mean(torch.matmul(loss_mask_num.view(center, 1, C, C).repeat(1, B, 1, 1), fmap),
dim=(2, 3))
# cosine
mean_activate = torch.div(mean_activate, torch.norm(mean_activate, p=2, dim=0, keepdim=True) + 1e-5)
inner_dot = torch.matmul(mean_activate.permute(1, 0), mean_activate).view(-1, B, B).repeat(self.class_num, 1, 1)
label_mask, label_mask_intra = self.get_label_mask(label)
new_Num = torch.mean(inner_dot * label_mask, dim=(1, 2))
new_Den = torch.mean(inner_dot * label_mask_intra, dim=(1, 2))
ret_loss = -torch.sum(new_Num / (new_Den + 1e-5))
return ret_loss
def Cal_Center(fmap, gt):
f_1map = fmap.detach().cpu().numpy()
matrix = gt.detach().cpu().numpy()
B, C, H, W = f_1map.shape
cluster = []
visited = np.zeros(C)
for i in range(matrix.shape[0]):
tmp = []
if(visited[i]==0):
for j in range(matrix.shape[1]):
if(matrix[i][j]==1 ):
tmp.append(j)
visited[j]=1;
cluster.append(tmp)
center = []
for i in range(len(cluster)):
cur_clustet_fmap = f_1map[:,cluster[i],...]
cluster_center = np.mean(cur_clustet_fmap,axis=1)
center.append(cluster_center)
center = np.transpose(np.array(center),[1,0,2,3])
center = torch.from_numpy(center).float()
return center
|
ada-shen/icCNN
|
utils/utils.py
|
utils.py
|
py
| 4,582 |
python
|
en
|
code
| 18 |
github-code
|
6
|
20801228142
|
from itertools import chain
y, x = [int(i) for i in input().split()]
matrix = [[] for _ in range(y)]
for i in range(y):
s = input()
for u in s:
if u == ".":
matrix[i].append(True)
elif u == "#":
matrix[i].append(False)
def step(y, x, matrix):
matrix[y][x] = True
tmp = [[y, x+1],[y, x-1],[y-1, x],[y+1, x]]
neighbors = []
for i in tmp:
try:
if matrix[i[0]][i[1]] == False:
neighbors.append(i)
except:
pass
if len(neighbors) == 0:
return
else:
for i in neighbors:
step(i[0], i[1], matrix)
count = 0
while False in chain.from_iterable(matrix):
coord = []
for i in matrix:
if False in i:
coord = [matrix.index(i), i.index(False)]
step(coord[0], coord[1], matrix)
count += 1
print(count)
|
michbogos/olymp
|
eolymp/dynamic_programming/cut_paper.py
|
cut_paper.py
|
py
| 915 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21705447470
|
from glob import glob
from os import makedirs
from shutil import copy2
from tqdm import tqdm
SINGER = 'mixed'
RELEASE_DIR = 'release/mixed_---'
PATH_QUESTION = 'conf/jp_qst001_nnsvs_simple_4-4_mix.hed'
NAME_EXPERIMENT = 'simple_4-4_mix'
def copy_question(path_question, release_dir):
"""
hedファイル(question)をコピー
"""
makedirs(f'{release_dir}/conf', exist_ok=True)
print('copying question')
copy2(path_question, f'{release_dir}/{path_question}')
def copy_scaler(singer, release_dir):
"""
dumpフォルダにあるファイルをコピー
"""
makedirs(f'{release_dir}/dump/{singer}/norm', exist_ok=True)
list_path_scaler = glob(f'dump/{singer}/norm/*_scaler.joblib')
print('copying scaler')
for path_scaler in tqdm(list_path_scaler):
copy2(path_scaler, f'{release_dir}/{path_scaler}')
def copy_model(singer, name_exp, release_dir):
"""
name_exp: 試験のID
"""
name_exp = singer + '_' + name_exp
makedirs(f'{release_dir}/exp/{name_exp}/acoustic', exist_ok=True)
makedirs(f'{release_dir}/exp/{name_exp}/duration', exist_ok=True)
makedirs(f'{release_dir}/exp/{name_exp}/timelag', exist_ok=True)
list_path_model = glob(f'exp/{name_exp}/*/best_loss.pth')
list_path_model += glob(f'exp/{name_exp}/*/latest.pth')
list_path_model += glob(f'exp/{name_exp}/*/model.yaml')
print('copying model')
for path_model in tqdm(list_path_model):
copy2(path_model, f'{release_dir}/{path_model}')
def main():
"""
各種ファイルをコピーする
"""
copy_question(PATH_QUESTION, RELEASE_DIR)
copy_scaler(SINGER, RELEASE_DIR)
copy_model(SINGER, NAME_EXPERIMENT, RELEASE_DIR)
if __name__ == '__main__':
main()
|
oatsu-gh/nnsvs_mixed_db
|
recipe/00-svs-world/make_it_for_release.py
|
make_it_for_release.py
|
py
| 1,761 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25200222730
|
#For example, given [(30, 75), (0, 50), (60, 150)], you should return 2.
classes = [(900, 910), (940, 1200), (950, 1120),(1100, 1130), (1500, 1900), (1800, 2000)]
started = []
for x,i in classes:
started+= [(x,'started'),(i,'ended')]
started = sorted(started)
needed = 0
ongoing = 0
print(started)
for i in range(len(started)):
if started[i][1] == 'started':
ongoing +=1
#print(started[i][0])
if ongoing > needed:
needed = ongoing
else:
ongoing -=1
print (needed)
|
etukleris/various
|
python/timers.py
|
timers.py
|
py
| 540 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24966970253
|
#!/usr/bin/env python
from netCDF4 import Dataset
import copyNCVariable as copync
import sys, os
import random
import pdb
import numpy as np
import datetime as dt
#
#
#
def usage():
print("Usage")
print(" "+sys.argv[0]+" [filename] [dim name]")
exit(1)
def change_time_units(var):
"""Change the time unit from epoch time to hours since 1800"""
century18 = dt.datetime(1800,1,1,0)
#for i,j in enumerate(var[:]):
# date = dt.datetime.utcfromtimestamp(j)
# seconds = (date - century18).total_seconds()
# hours = int( seconds / 60 / 60 )
# var[i] = hours
def change_unit(date):
date = dt.datetime.utcfromtimestamp(date)
seconds = (date - century18).total_seconds()
hours = int( seconds / 60 / 60 )
return hours
vfunc = np.vectorize(change_unit)
new_data = vfunc(var[:])
var[:] = new_data
setattr(var, 'standard_name', "time")
setattr(var, 'long_name', "time")
setattr(var, "units","hours since 1800-01-01 00:00:00.0")
setattr(var, "calendar", "proleptic_gregorian")
return var
def add_utc_date(nc, time_var):
""" Adds human readable date variable.
Assumes date is in seconds since epoch.
time_var is netCDF.Variable object.
"""
# Create Variable
utc = nc.createVariable('utc_time', int, ('time'))
setattr(utc, 'standard_name', "time")
setattr(utc, 'long_name', "UTC date yyyy-mm-dd hh:00:00 as yyyymmddhh")
setattr(utc, "units","Gregorian_year month day hour")
toUTC = lambda d: int(dt.datetime.fromtimestamp(d).strftime('%Y%m%d%H'))
vfunc = np.vectorize(toUTC)
utc_data = vfunc(time_var[:])
utc[:] = utc_data
def find_variables_with_dimension(nc, dim_name):
selected_vars = []
for var_name in nc.variables:
var = nc.variables[var_name]
if dim_name in var.dimensions:
selected_vars.append(var)
return selected_vars
def find_variables_without_dimension(nc, dim_name):
selected_vars = []
for var_name in nc.variables:
var = nc.variables[var_name]
if dim_name not in var.dimensions:
selected_vars.append(var)
return selected_vars
def check_if_reduce_needed(vars_to_modify):
"""Return True if variable has missing start and end"""
for var in vars_to_modify:
if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \
var[-1,1,:,:].mask.all():
return True
return False
def add_time_bounds(nc, varname):
"""
Adds a time bounds variable to variable.
Assumes time dimension is called 'time'
"""
THREE_HOURS = 60*60*3 # in seconds
bnds_name = 'time_bnds'
bounds_dim = 'nv'
# Create bounds dimension
nc.createDimension(bounds_dim, 2)
# Get variable matching varname
time_var = nc.variables['time']
time_var.setncattr('bounds', bnds_name)
time_data = time_var[:]
time_length = len(time_data)
# reshape time data
bounds_data = np.dstack((time_data,time_data)).reshape(time_length,2)
for i in bounds_data:
i[0] = i[0] - (THREE_HOURS)
bounds_var = nc.createVariable(bnds_name, time_var.dtype, ('time', bounds_dim), fill_value=9999)
bounds_var[:] = bounds_data
def add_cell_methods(nc):
methods = {
'avg' : 'mean',
'accum' : 'sum',
'min' : 'minimum',
'max' : 'maximum'
}
step_str = 'GRIB_stepType'
for i in nc.variables:
var = nc.variables[i]
if step_str in var.ncattrs() and 'instant' not in var.getncattr(step_str):
if 'cell_methods' in var.ncattrs():
cur_str = var.getncattr('cell_methods')
var.setncattr('cell_methods', cur_str + " time: " + methods[var.getncattr(step_str)])
else:
pass
#var.setncattr('cell_methods', "time: " + methods[var.getncattr(step_str)])
def change_coordinates(nc):
for i in nc.variables:
var = nc.variables[i]
if 'coordinates' in var.ncattrs():
coord_str = var.getncattr('coordinates')
coord_str = coord_str.replace('valid_time', '')
coord_str = coord_str.replace('step', '')
if 'time' not in coord_str:
coord_str += " time"
coord_str = ' '.join(coord_str.split())
var.setncattr('coordinates', coord_str)
def remove_dimension(nc, dim_name, outfile=None):
vars_to_modify = find_variables_with_dimension(nc, dim_name)
vars_to_copy = find_variables_without_dimension(nc, dim_name)
reduce_needed = check_if_reduce_needed(vars_to_modify)
if outfile is None:
outfile = 'tmp' + str(random.randint(1,10000)) + '.nc'
tmp_nc = Dataset(outfile, 'w')
# First copy global attrs
copync.copy_global_attrs(nc, tmp_nc)
# Then copy dimensions minus unwanted
copync.copy_dimensions(nc, tmp_nc, ignore=['time',dim_name])
if 'step' in nc.dimensions:
if reduce_needed:
tmp_nc.createDimension('time', (nc.dimensions['time'].size * nc.dimensions['step'].size) - 2)
else:
tmp_nc.createDimension('time', nc.dimensions['time'].size * nc.dimensions['step'].size )
else:
tmp_nc.createDimension('time', nc.dimensions['time'].size)
if len(vars_to_modify) == 0: # not in dimensions, but need to get rid of step vars
err_str = "'" + dim_name + "' is not in any of the variables."
#raise Exception(err_str)
time_var = None
valid_var = None
for var in vars_to_copy:
if var.name != 'time' and var.name != 'step' and var.name != 'valid_time':
copync.copy_variable(nc, tmp_nc, var.name)
elif var.name == 'time':
time_var = var
elif var.name == 'valid_time':
valid_var = var
new_var = tmp_nc.createVariable('time', valid_var.dtype, ('time',))
copync.copy_var_attrs(valid_var, new_var)
new_var[:] = valid_var[:]
return (outfile, tmp_nc)
# Next, copy unchanged vars
time_var = None
for var in vars_to_copy:
if var.name != 'time':
copync.copy_variable(nc, tmp_nc, var.name)
else:
time_var = var
for var in vars_to_modify:
# If described by only unwanted dimension, then remove variable.
if len(var.dimensions) == 1:
# Remove variable
pass
else:
# find dim index
dims = var.dimensions
dims_list = list(dims)
shape = var.shape
shape_list = list(shape)
idx = dims.index(dim_name)
if idx == 0:
print('Need to implement')
print('Exiting.')
exit(1)
size = shape_list.pop(idx)
dims_list.pop(idx)
dims = tuple(dims_list)
shape_list[idx-1] = shape_list[idx-1]*size
new_data = var[:].reshape(*shape_list)
if reduce_needed:
if len(dims) == 1:
new_data = new_data[1:-1]
elif len(dims) > 1:
new_data = new_data[1:-1,:,:]
varname = var.name
if varname == 'valid_time':
varname = 'time'
new_var = tmp_nc.createVariable(varname, var.dtype, dims)
copync.copy_var_attrs(var, new_var)
new_var[:] = new_data
step_str = 'GRIB_stepType'
if step_str in new_var.ncattrs() and new_var.getncattr(step_str) is not 'instant':
add_time_bounds(tmp_nc, new_var.name)
return (outfile, tmp_nc)
def change_fill_value(nc, fill_value):
"""Changes fill value for all variables in file"""
outfile = 'tmp' + str(random.randint(1,100000)) + '.nc'
out_nc = copync.copy_dimensions(nc, outfile)
copync.copy_variables(nc, out_nc, new_fill_value=fill_value)
out_nc.close()
return outfile
if __name__ == '__main__':
if len(sys.argv) <= 2:
usage()
outfile = None
nc_file = sys.argv[1]
dim_name = sys.argv[2]
nc = Dataset(nc_file, 'r+')
if dim_name != "none":
outfile,nc = remove_dimension(nc, dim_name)
add_cell_methods(nc)
change_coordinates(nc)
add_utc_date(nc, nc.variables['time'])
change_time_units(nc.variables['time'])
if 'time_bnds' in nc.variables:
change_time_units(nc.variables['time_bnds'])
second_outfile = change_fill_value(nc, 9999)
nc.close()
if outfile is not None:
os.remove(outfile)
os.rename(second_outfile, nc_file)
|
NCAR/rda-dataset-curation
|
common/removeDimension.py
|
removeDimension.py
|
py
| 8,646 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74118290427
|
import os
import sys
import yt_dlp
from tqdm import tqdm
def download_videos(links_file):
if not os.path.exists(links_file):
print("Error: The links file '{}' does not exist.".format(links_file))
return
ydl_opts = {
'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',
'outtmpl': '%(autonumber)s - %(title)s.%(ext)s',
'merge_output_format': 'mp4',
}
with open(links_file, 'r') as f:
video_links = f.read().splitlines()
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
for link in video_links:
try:
info_dict = ydl.extract_info(link, download=True)
print("\n{}: Download completed!".format(info_dict['title']))
except Exception as e:
print("\nError downloading video: {}".format(e))
continue
if __name__ == "__main__":
links_file = "links.txt"
download_videos(links_file)
|
vishnu012/Personal-Scripts
|
pydownload/downloader.py
|
downloader.py
|
py
| 961 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27104562564
|
import os
import pickle
import uvicorn
from fastapi import FastAPI
FAKE_HASH_TABLE_DB = './database/FakeHashTable.pickle'
class FakeHashTable:
def __init__(self, bit_limitation=10):
self.limitation = 2 ** bit_limitation
self.hashtable = dict()
self.id_list = set()
self.history = list()
self.avail_id = list(range(self.limitation))
def hash(self, value, replacement=None):
"""
:param value: value to hash
:param replacement: if replacement = 'oldest' this instance will replace the object by the oldest record. If
replacement is the value that existed in hashtable, it will remove old record and replace by new value.
:return:
"""
# For user replace ID by a new value
if replacement is not None:
if replacement == 'oldest' and self.history.__len__() > 2:
old_id = self.hashtable[self.history[0]]
del self.hashtable[self.history[0]]
self.history = self.history[1:]
self.history.append(value)
self.hashtable[value] = old_id
if replacement in self.hashtable:
old_id = self.hashtable[replacement]
self.history.remove(old_id)
self.history.append(value)
del self.hashtable[replacement]
self.hashtable[value] = old_id
return old_id
return None
if value in list(self.hashtable.keys()):
return self.hashtable[value]
# If larger than 10 bit, return None
if self.hashtable.items().__len__() > self.limitation:
return None
# Add new ID
new_id = self.avail_id.pop(0)
self.history.append(value)
self.id_list.add(new_id)
self.hashtable[value] = new_id
return new_id
def remove(self, value):
if value not in self.hashtable:
return False
old_id = self.hashtable[value]
del self.hashtable[value]
self.id_list.remove(value)
self.avail_id.append(old_id)
self.history.remove(value)
def backup_htb_object(in_htb):
with open(FAKE_HASH_TABLE_DB, 'wb') as ff:
pickle.dump(in_htb, ff)
def load_htb_object():
with open(FAKE_HASH_TABLE_DB, 'rb') as ff:
data = pickle.load(ff)
return data
app = FastAPI()
if os.path.exists(FAKE_HASH_TABLE_DB):
htb = load_htb_object()
else:
htb = FakeHashTable()
@app.post("/")
async def get_machine_id(value):
iid = htb.hash(value)
backup_htb_object(htb)
return {"id": iid}
if __name__ == '__main__':
uvicorn.run(
"hash_service:app",
host='0.0.0.0',
port=8000,
reload=True,
debug=True,
workers=3
)
|
hoangperry/system-design-implementation
|
unique-id-generator/hash_service.py
|
hash_service.py
|
py
| 2,812 |
python
|
en
|
code
| 2 |
github-code
|
6
|
73510589947
|
n = int(input())
arr = list(map(int,input().split()))
left = 0
right = n-1
wube = 0
henock = 0
flag = True
while left<=right:
if flag:
if arr[left]>arr[right]:
wube += arr[left]
left += 1
else:
wube += arr[right]
right -= 1
flag = False
elif not flag:
if arr[left]>arr[right]:
henock += arr[left]
left += 1
else:
henock += arr[right]
right -= 1
flag = True
print(wube,henock)
|
yonaSisay/a2sv-competitive-programming
|
cardGame.py
|
cardGame.py
|
py
| 529 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2374031473
|
from torchvision import models
import torch
import torch.nn as nn
from PIL import ImageGrab
import cv2
import torch.nn.functional as F
# import albumentations as A
# from albumentations.pytorch import ToTensorV2
from torchvision import transforms
import numpy as np
from PIL import Image
from input_keys import PressKey, ReleaseKey
import time
#import torch.nn.functional as F
#output = torch.randn(10, 5) # example output tensor
#softmax_result = F.softmax(output, dim=1)
labels = {0: 'a', 1: 'w', 2: 'd'}
#labels = {0: 'a', 1: 'w', 2: 'd', 3: 's'}
def ingame_predic():
test_transform = transforms.Compose(
[
# A.SmallestMaxSize(max_size=160),
transforms.Resize((640, 480)),
# A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
transforms.ToTensor()
]
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#net = models.mobilenet_v3_large(pretrained=True, weights=models.mobilenet_v3_large(pretrained=True).weights.IMAGENET1K_V1)
net = models.mobilenet_v3_large(pretrained=True)
net.classifier[3] = nn.Linear(in_features=1280,out_features=3)
#net = models.efficientnet_b4(pretrained=True)
#net.classifier[1] = nn.Linear(in_features=1792,out_features=4)
#net.load_state_dict(torch.load('./mbmodel.pt', map_location=device))
net.load_state_dict(torch.load('./mbv3model.pt', map_location=device))
net.to(device)
net.eval()
while(True):
with torch.no_grad():
screen = ImageGrab.grab(bbox=(0, 40, 1024, 768)) # 1024, 768 화면을 받아서 Numpy Array로 전환
# screen = cv2.imread('./test_image2.jpg') # test image
# input_image = Image.fromarray(screen)
input_image = test_transform(screen).unsqueeze(0).to(device)
output = net(input_image)
softmax_result = F.softmax(output)
top_prob, top_label = torch.topk(softmax_result, 1)
prob = round(top_prob.item() * 100, 2)
label = labels.get(int(top_label))
# print(f'prob: {prob}, label: {label}')
W = 0x11
A = 0x1E
S = 0x1F
D = 0x20
if (60 < prob) and (label == 'a'):
PressKey(A)
time.sleep(0.5)
ReleaseKey(A)
elif (60 < prob) and (label == 'w'):
PressKey(W)
time.sleep(0.5)
ReleaseKey(W)
elif (60 < prob) and (label == 'd'):
PressKey(D)
time.sleep(0.5)
ReleaseKey(D)
elif (60 < prob) and (label == 's'):
PressKey(S)
time.sleep(0.5)
ReleaseKey(S)
else:
time.sleep(0.5)
print(prob,label)
#return prob, label
if __name__ == '__main__':
predic_prob, predic_label = ingame_predic()
print(predic_prob, predic_label)
|
DH-an/Metaverse_Autonomous_Driving_AI_Project
|
Data_Collecting/ingame_testing.py
|
ingame_testing.py
|
py
| 3,032 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71601955069
|
import datetime
dogdict = {
"american staffordshire terrier": True,
"pitbull terrier": True,
"bullterrier": True,
"bullmastiff": True,
"staffordshire bullterrier": True,
"cane corso": True,
"dogo argentino": True,
"bordeaux dogge": True,
"fila brasileiro": True,
"mastin espanol": True,
"französische bulldogge": False,
"labrador": False,
"chihuahua": False,
"australian shepherd": False,
"rottweiler": False,
"border collie": False,
"golden retriever": False,
"rhodesian ridgeback": False,
"mops": False,
"berner sennenhund": False
}
citydict = {
75015: 108,
76359: 96,
69254: 78,
76275: 96,
76287: 90,
76337: 108,
76307: 66,
76327: 72,
75045: 108,
76356: 96,
76297: 84,
76344: 84,
76351: 72,
76707: 72,
76676: 60,
76689: 69,
76646: 96,
75053: 48,
75038: 90,
76703: 87,
76698: 61,
76707: 48,
68753: 96,
76661: 96,
76709: 72,
76669: 90,
76684: 75,
75059: 72
}
def serverHandler(clientString):
dogInformations = splitStringFromClient(clientString, True)
personalInformation = splitStringFromClient(clientString, False)
if(dogInformations[3].lower() == "nein"):
regionTax = getRegionTax(personalInformation)
dogRaceSeperation = getDogRaceSeperation(dogInformations)
dogTax = getDogTax(dogRaceSeperation, regionTax)
else:
dogTax = 0
return dogTax
def splitStringFromClient(clientString, state):
try:
seperations = clientString.split(',')
if(state):
dogInformation = seperations[1].split(';')
return dogInformation
else:
personalInformation = seperations[0].split(';')
return personalInformation
except:
print("Error in SplitStringFormClient")
def getRegionTax(personalInformations):
regionTax = citydict[int(personalInformations[6])]
return regionTax
def getDogTax(dogRaceSeperation,regionTax):
dogTax = None
if(dogRaceSeperation == True):
dogTax = int(regionTax) * 5
else:
dogTax = int(regionTax)
if(dogTax != None):
return dogTax
else:
return "Error in DogTax"
def getDogRaceSeperation(dogInformations):
dogRaceSeperation = dogdict[dogInformations[2]]
return dogRaceSeperation
def checkDate(value):
try:
day, month, year = map(int, value.split('.'))
geburtstag_obj = datetime.date(year, month, day)
try:
date = value.strip()
datetime.datetime.strptime(date, "%d.%m.%Y")
except ValueError:
return "Ungueltiges Datumsformat. Bitte geben Sie den Geburtstag im Format TT.MM.JJJJ ein.", False
if geburtstag_obj >= datetime.date.today():
return "Ungueltiges Datumsformat. Der Geburtstag muss in der Vergangenheit liegen.", False
if month == 2 and day > 28:
leap_year = (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0)
if (day == 29 and not leap_year) or (day > 29):
return "Ungueltiges Datumsformat. Bitte geben Sie ein gueltiges Geburtsdatum ein.", False
return "", True
except:
return "Bei der Datumsüberprüfung lief etwas schief. Bitte wenden Sie sich an einen Admin", False
def checkInput(value,nessesary, nameCheck):
if(nessesary):
if(value == None or value == ''):
return False
else:
if("Geburtstag" in nameCheck):
result = checkDate(value)
return result
elif(nameCheck == "Hunderasse"):
if(value in dogdict):
return True
else:
return False
elif(nameCheck == "Ermaessigung"):
if(value.lower() == "ja" or value.lower() == "nein"):
return True
else:
return False
elif(nameCheck == "PLZ"):
if(value.isnumeric and int(value.strip()) in citydict):
return True
else:
return False
elif(nameCheck == "Hausnummer"):
if(value.isnumeric()):
return True
else:
return False
else:
return True
else:
if(value != None):
return True
else:
return False
|
MHin504/OZG-Hundesteuer
|
Server.py
|
Server.py
|
py
| 4,498 |
python
|
de
|
code
| 0 |
github-code
|
6
|
1798043180
|
import time
import json
import board
import busio
import adafruit_ads1x15.ads1015 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
max_val = None
min_val = None
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
ads = ADS.ADS1015(i2c)
# Create single-ended input on channel 0
chan = AnalogIn(ads, ADS.P2)
baseline_check = input("Is Light Sensor Covered? (enter 'y' to proceed): ")
if baseline_check == 'y':
max_val = chan.value
print("------{:>5}\t{:>5}".format("raw", "v"))
for x in range(0, 10):
if chan.value > max_val:
max_val = chan.value
print("CHAN 2: "+"{:>5}\t{:>5.3f}".format(chan.value, chan.voltage))
time.sleep(0.5)
print('\n')
water_check = input("Does the Light Sensor receive the maximum light? (enter 'y' to proceed): ")
if water_check == 'y':
min_val = chan.value
print("------{:>5}\t{:>5}".format("raw", "v"))
for x in range(0, 10):
if chan.value < min_val:
min_val = chan.value
print("CHAN 2: "+"{:>5}\t{:>5.3f}".format(chan.value, chan.voltage))
time.sleep(0.5)
config_data = dict()
config_data["min"] = min_val
config_data["max"] = max_val
with open('light_config.json', 'w') as outfile:
json.dump(config_data, outfile)
print('\n')
print(config_data)
time.sleep(0.5)
|
pdany1116/is-iot-collector
|
helpers/light_intensity_moisture_calibration.py
|
light_intensity_moisture_calibration.py
|
py
| 1,305 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21987190006
|
import pytest
@pytest.fixture(name="fixer")
def fixer_fixture(two_to_three_test_case):
return two_to_three_test_case("methodattrs")
attrs = ["func", "self", "class"]
def test(fixer):
for attr in attrs:
b = "a.im_%s" % attr
if attr == "class":
a = "a.__self__.__class__"
else:
a = "a.__%s__" % attr
fixer.check(b, a)
b = "self.foo.im_%s.foo_bar" % attr
if attr == "class":
a = "self.foo.__self__.__class__.foo_bar"
else:
a = "self.foo.__%s__.foo_bar" % attr
fixer.check(b, a)
def test_unchanged(fixer):
for attr in attrs:
s = "foo(im_%s + 5)" % attr
fixer.unchanged(s)
s = "f(foo.__%s__)" % attr
fixer.unchanged(s)
s = "f(foo.__%s__.foo)" % attr
fixer.unchanged(s)
|
ryanwersal/crosswind
|
fixer_suites/two_to_three/tests/test_methodattrs.py
|
test_methodattrs.py
|
py
| 847 |
python
|
en
|
code
| 11 |
github-code
|
6
|
19581566047
|
from os.path import join as osjoin
paper_root_dir = 'paper'
stdj_dir = osjoin(paper_root_dir, 'stdj')
ctu_dir = osjoin(paper_root_dir, 'ctu')
stats_about_file_dir = osjoin('..', 'stats_about_files')
root_dir = osjoin('..', 'corpus')
# store all documents
all_doc_dir = osjoin(root_dir, 'all_doc')
# store source documents
src_dir = osjoin(root_dir, 'src')
# store raw suspicious documents
raw_susp_dir = osjoin(root_dir, 'raw_susp')
# store suspicious documents after making plagiarism docs
susp_dir = osjoin(root_dir, 'susp')
# store plagiarism paragraph location in suspicious docs
susp_stats_dir = osjoin(root_dir, 'susp_stats')
predict_susp_stats_dir = osjoin(root_dir, 'predicted_susp_stats')
# store embedding of documents, in pickle format
embeddings_dir = 'embeddings'
src_embeddings_dir = osjoin(embeddings_dir, 'src')
susp_embeddings_dir = osjoin(embeddings_dir, 'susp')
susp_embeddings_for_classification = osjoin(
embeddings_dir, 'susp_for_classification'
)
train_classifier_dir = 'train_classifier'
csv_dir = osjoin(train_classifier_dir, 'csv')
parquet_train_classifier_dir = osjoin(train_classifier_dir, 'parquet')
train_classifier_log_file = osjoin('log', 'train_classification_model.log')
find_plagiarism_paragraph_dir = 'find_plagiarism_paragarph'
plg_dataframe_dir = osjoin(find_plagiarism_paragraph_dir, 'plg_dataframe')
predited_stats_dir = osjoin(find_plagiarism_paragraph_dir, 'predicted_stats')
find_plg_log_file = osjoin('log', 'find_plagiarism_paragraph.log')
production_susp_dir = osjoin(root_dir, 'production_susp')
production_susp_stats_dir = osjoin(root_dir, 'production_susp_stats')
|
oldguard69/lvtn
|
server/core/directory.py
|
directory.py
|
py
| 1,627 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6673888762
|
import bs4
import requests
url = 'https://id.carousell.com/carousell_id'
contents = requests.get(url)
response = bs4.BeautifulSoup(contents.text, 'html.parser')
data = response.find('div', attrs={'class': 'D_apq D_eZ M_gF D_fb M_gH'})
datas = data.findAll('div', attrs={'class': 'D_jg', 'class': 'D_qq', 'class': 'D_qv'})
# print(datas)
for obj in datas:
judul = obj.find('p', attrs={'class': "D_bN M_aT D_aP M_aC D_bO M_aU D_bR M_aX D_bT M_aZ D_bW M_bc "
"D_bZ M_bg D_bK"}).text
image = obj.find('img', attrs={'class': 'D_bl', 'class': 'D_bi', 'class': 'D_tO'})['src']
with open('images/' + judul + '.jpg', 'wb') as f:
img = requests.get(image)
f.write(img.content)
|
AlfaRiza/ScrapingCarousell
|
getImg.py
|
getImg.py
|
py
| 748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70514418749
|
import numpy as np
import json
import matplotlib.pyplot as plt
import os
# Joints such as ear, eyes are not necassary
wanted_joints = list(range(5,18)) + [19]
def normalize_halpe26(poses, img):
hip_idx = 13 #19 before removal of unneeded joints
for i, det in enumerate(poses):
nrows = det['box'][3]
ncols = det['box'][2]
# Define the centroid of pelvis (hip) as center of image
center_x = det['keypoints'][hip_idx, 0]
center_y = det['keypoints'][hip_idx, 1]
keypts = poses[i]['keypoints'].copy()[:,:-1]
keypts_norm = keypts.copy()
keypts_norm[:, 0] = (center_x - keypts[:, 0]) / ncols
keypts_norm[:, 1] = (center_y - keypts[:, 1]) / nrows
poses[i]['keypoints'][:, :-1] = keypts_norm
return poses
if __name__ == "__main__":
print(wanted_joints, len(wanted_joints))
|
DiscipleOfProgramming/hockey-pose-estimation
|
parse_halpe26.py
|
parse_halpe26.py
|
py
| 893 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.