seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
โ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
43536088674
|
import re
import math
import scipy.stats as stats
from statsmodels.stats.multitest import multipletests
import numpy as np
import pandas as pd
from tqdm import tqdm
# import functools
import pprint
from mutagene.dna import (
nucleotides, complementary_nucleotide,
bases_dict,
# comp_dict,
extended_nucleotides, complementary_extended_nucleotide)
from mutagene.io.motifs import get_known_motifs
import logging
logger = logging.getLogger(__name__)
def identify_motifs(samples_mutations, custom_motif=None, strand=None, threshold=None, dump_matches=None, stat_type=None):
"""
:param samples_mutations: list of mutations from input file
:param custom_motif: specified motif to search for
:param strand: strand(s) to search on (T: transcribed, N: non-transcribed, A: any, or a combination theirof: 'TNA')
:param dump_matches: pass through to process_mutations, stores all motif matches
:param stat_type: pass through to process_mutations, choose statistical test
:return: command-line output
"""
motif_matches = []
sig_motif_matches = []
pvals = []
if strand is None:
strand = 'A'
else:
strand = set(strand) # in case TNA codes repeat
if threshold is None:
threshold = 0.05
if custom_motif:
search_motifs = scanf_motif(custom_motif)
else:
motifs = get_known_motifs()
search_motifs = motifs.copy()
# search_motifs.extend(scanf_motif(custom_motif))
_strand_map = {
'T': 'transcribed',
'N': 'non-transcribed',
'A': 'any strand'
}
disable_progress_bar = logger.getEffectiveLevel() == logging.DEBUG
for sample, mutations in tqdm(samples_mutations.items(), leave=False, disable=disable_progress_bar):
if mutations is not None and len(mutations) > 0:
first_mut_seq_with_coords = mutations[0][-1]
window_size = (len(first_mut_seq_with_coords) - 1) // 2
for m in tqdm(search_motifs, leave=False, disable=disable_progress_bar):
for s in strand:
result, saved_data = process_mutations(
mutations,
m['motif'],
m['position'],
m['ref'],
m['alt'],
window_size,
s,
stat_type=stat_type)
if dump_matches:
for chrom, pos in saved_data['mutation_motif']:
dump_matches.write(
"chr{}\t{}\t{}\t{}\t{}\t{}\n".format(
chrom, pos, int(pos) + 1, sample, m['logo'], _strand_map[s]))
debug_data = {
'sample': sample,
'motif': m['logo'],
'strand': s}
debug_data.update(result)
debug_string = pprint.pformat(debug_data, indent=4)
logger.debug(debug_string)
motif_matches.append({
'sample': sample,
'mutagen': m['name'],
'motif': m['logo'],
'strand': _strand_map[s],
'enrichment': result['enrichment'],
'mut_min': result['mutation_load'],
'mut_max': result['bases_mutated_in_motif'],
'odds_ratio': result['odds_ratio'],
'pvalue': result['pvalue']
})
pvals.append(result['pvalue'])
qvalues = get_corrected_pvalues(pvals)
for i, motif_dict in enumerate(motif_matches):
motif_matches[i]['qvalue'] = qvalues[i]
if motif_dict['mut_min'] == 0:
continue
if motif_dict['qvalue'] >= threshold:
continue
sig_motif_matches.append(motif_dict)
return sig_motif_matches
def scanf_motif(custom_motif):
""" recognize motif syntax like A[C>T]G and create a motif entry """
m = re.search(
r'([' + extended_nucleotides + ']*)\\[([' + nucleotides + '])>([' + extended_nucleotides + '])\\]([' + extended_nucleotides + ']*)',
custom_motif.upper())
if m:
g = m.groups('')
# print("GROUPS", m.group(1), m.group(2), m.group(3), m.group(4))
entry = {}
entry['logo'] = m.group(0)
entry['motif'] = g[0] + g[1] + g[3]
entry['position'] = len(g[0])
entry['ref'] = g[1]
entry['alt'] = g[2]
if entry['ref'] == entry['alt']:
return []
entry['name'] = 'Custom motif'
entry['references'] = ''
return [entry, ]
return []
def calculate_RR(ct):
"""
Mutation is treatment
No mutation is placebo
:param ct: mutually exclusive counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:return: enrichment or risk ratio
"""
try:
RR = ((ct.loc['mutation', 'motif'] / (ct.loc['mutation', 'motif'] + ct.loc['mutation', 'no motif'])) /
(ct.loc['no mutation', 'motif'] / (ct.loc['no mutation', 'motif'] + ct.loc['no mutation', 'no motif'])))
except ZeroDivisionError:
RR = 0.0
return RR
def calculate_RR_for_motif(ct):
"""
Motif is treatment
No motif is placebo
:param ct: mutually exclusive counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:return: enrichment or risk ratio
"""
try:
RR = ((ct.loc['mutation', 'motif'] / (ct.loc['mutation', 'motif'] + ct.loc['no mutation', 'motif'])) /
(ct.loc['mutation', 'no motif'] / (ct.loc['mutation', 'no motif'] + ct.loc['no mutation', 'no motif'])))
except ZeroDivisionError:
RR = 0.0
return RR
def calculate_OR(ct):
"""
:param ct: mutually exclusive counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:return: odds ratio
"""
try:
OR = (
(ct.loc['mutation', 'motif'] / ct.loc['mutation', 'no motif']) /
(ct.loc['no mutation', 'motif'] / ct.loc['no mutation', 'no motif']))
except ZeroDivisionError:
OR = 0.0
return OR
def Haldane_correction(ct, pseudocount=0.5):
"""
:param ct: mutually exclusive counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:return: contigency table after Haldane correction is applied
"""
""" apply Haldane correction (+ 0.5) if any of the values in the contingency table is zero """
return ct + pseudocount if np.any(np.isclose(ct.to_numpy(), 0.0)) else ct
def calculate_mutation_load(N_mutations, enrichment):
"""
Mutation load (minimum estimate) calculation following Gordenin et al protocol
However, at this point motif matches are not filtered for p-value significance
That's done in the end after multiple testing correction
"""
mutation_load = 0.0
if enrichment > 1.0:
mutation_load = N_mutations * (enrichment - 1) / enrichment
# elif p_value < p_value_threshold: tests for enrichment depletion
return mutation_load
def get_stats(ct, stat_type='fisher'):
"""
Calculate Fisher and Chi2 test pvalues,
:param ct: counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:param stat_type: Type of pvalue (Fisher's ('fisher') or Chi-Square ('chi2'))
:return: pvalue of the corresponding statistical test
"""
p_val = 1.0
if stat_type is None:
stat_type = 'fisher'
stat_type = stat_type.lower()
acceptable_tests = ('fisher', 'chi2')
if stat_type not in acceptable_tests:
logger.warning('get_stats() can only calculate p-values for ' + str(acceptable_tests))
if stat_type == 'fisher':
try:
p_val = stats.fisher_exact(ct, alternative="greater")[1]
# if p_val > 0.05:
# p_val = stats.fisher_exact(ct, alternative="less")[1] #calculates if motif is underrepresented
except ValueError:
p_val = 1.0
elif stat_type == 'chi2':
try:
p_val = stats.chi2_contingency(ct)[1]
except ValueError:
p_val = 1.0
return p_val
def get_corrected_pvalues(p_values):
qvalues = []
if len(p_values):
qvalues = multipletests(pvals=p_values, method='fdr_bh')[1]
return qvalues
# @functools.lru_cache(maxsize=None)
def get_rev_comp_seq(sequence):
"""
:param sequence: forward DNA sequence
:return: reverse complimentary DNA sequence
"""
# rev_comp_seq = "".join([complementary_nucleotide[i] for i in reversed(sequence)])
cn = complementary_nucleotide
return [(i[0], i[1], cn[i[2]], '-') for i in reversed(sequence)]
def mutated_base(mutation, ref, alt):
"""
:param mutation: [(record.CHROM, record.POS, record.REF, record.ALT)]
:param ref: list the nucleotide base pre-mutation
:param alt: list the nucleotide base post-mutation
:return: True if mutation matches the specified ref and alt
"""
# makes sure single base substitution
_, _, mut_ref, mut_alt = mutation
if mut_alt and mut_ref and len(mut_ref) == 1 and len(mut_alt) == 1 and mut_ref != mut_alt:
# mutation matches the substitution
if mutation[2] in bases_dict[ref] and mutation[3] in bases_dict[alt]:
return True
def find_matching_motifs(seq, motif, motif_position):
"""
:param seq: DNA sequence
:param motif: specified motif
:param motif_position: position of mutated base in motif, 0-base numbering
:return: generator of matching positions
TODO: SLOW algorithm O(n * m). Need to create a suffix tree with regexp
"""
# print("Looking for motif {} in {}, {}".format(motif, sequence, len(sequence) - len(motif)))
for i in range(len(seq) - len(motif) + 1):
# s = seq[i: i + len(motif)]
# print(s)
for j, c in enumerate(motif):
if seq[i + j][2] not in bases_dict[c]:
break
else:
yield seq[i + motif_position]
def find_matching_bases(seq, ref, motif, motif_position):
"""
:param seq:
:param ref:
:param motif:
:param motif_position:
:return: bases that match mutations
"""
for i in range(motif_position, len(seq) - (len(motif) - motif_position) + 1):
# range excludes border of sequence that may be motifs that don't fit window size
if seq[i][2] in bases_dict[ref]:
yield seq[i]
def make_contingency_table(
array=None,
motif_mutation=None,
no_motif_mutation=None,
motif_no_mutation=None,
no_motif_no_mutation=None):
""" Make a 2x2 contingency table out of a numpy array or four integers"""
if array is not None:
assert isinstance(array, np.ndarray)
assert array.shape == (2, 2)
else:
array = np.array([
[motif_mutation, no_motif_mutation],
[motif_no_mutation, no_motif_no_mutation]
])
contingency_table = pd.DataFrame(array)
contingency_table.columns = ["motif", "no motif"]
contingency_table.index = ["mutation", "no mutation"]
return contingency_table
def process_mutations(mutations, motif, motif_position, ref, alt, range_size, strand, stat_type=None):
"""
:param mutations: mutations to be analyzed
:param motif: specified motif to search for
:param motif_position: location of mutation in motif, 0-base numbering from left of motif
:param ref: base pre-mutation
:param alt: base post-mutation
:param range_size: how far in the motif to search for
:param strand: strand motif should be searched on
:param stat_type: type of pvalue: Fisher's (default) or Chi-Square
:param dump_matches: an optional file handle to save all mutations matching the motif regardless of their significance
:return: (results summary disctionary, data_dump with stored_data or None if dump_matches is None)
"""
assert range_size >= 0
assert len(ref) == 1
assert len(alt) == 1
assert 0 <= motif_position < len(motif)
assert len(set(strand) - set("ATN")) == 0, "[process_mutations] only A, T, N allowed in strand parameter"
matching_bases = set()
matching_motifs = set()
matching_mutated_motifs = set()
matching_mutated_bases = set()
# extra loop for sample in sample list
for chrom, pos, transcript_strand, x, y, seq in mutations:
# extract the longest sequence we would ever need (motif + range_size); range size = # bases outside mutation
mutation = chrom, pos, x, y
rev_seq = get_rev_comp_seq(seq)
# assuming that all mutations are reported in '+' reference strand
if strand == 'A' or (strand == 'T' and transcript_strand == '+') or (strand == 'N' and transcript_strand == '-'):
# not mutated:
for ref_match in find_matching_bases(seq, ref, motif, motif_position):
matching_bases.add(ref_match[0:2])
for motif_match in find_matching_motifs(seq, motif, motif_position):
matching_motifs.add(motif_match[0:2])
# mutated:
if mutated_base(mutation, ref, alt):
# m = (mutation[0], mutation[1], mutation[2], "+")
matching_mutated_bases.add(mutation[0:2])
context_of_mutation = seq[range_size - motif_position: range_size - motif_position + len(motif)]
for motif_match in find_matching_motifs(context_of_mutation, motif, motif_position):
matching_mutated_motifs.add(motif_match[0:2])
if strand == 'A' or (strand == 'T' and transcript_strand == '-') or (strand == 'N' and transcript_strand == '+'):
# rev compl: not mutated:
for ref_match in find_matching_bases(rev_seq, ref, motif, motif_position):
matching_bases.add(ref_match[0:2])
for motif_match in find_matching_motifs(rev_seq, motif, motif_position):
matching_motifs.add(motif_match[0:2])
# rev compl: mutated:
if mutated_base(mutation, complementary_extended_nucleotide[ref], complementary_extended_nucleotide[alt]):
# m = (mutation[0], mutation[1], mutation[2], "-")
matching_mutated_bases.add(mutation[0:2])
# rev comp:
context_of_mutation = rev_seq[range_size - motif_position: range_size - motif_position + len(motif)]
for motif_match in find_matching_motifs(context_of_mutation, motif, motif_position):
matching_mutated_motifs.add(motif_match[0:2])
motif_mutation_count = len(matching_mutated_motifs) # bases mutated in motif
stat_mutation_count = len(matching_mutated_bases - matching_mutated_motifs) # bases mutated not in motif
stat_motif_count = len(matching_motifs - matching_mutated_motifs) # bases not mutated in motif
stat_ref_count = len(matching_bases - (matching_motifs | matching_mutated_bases)) # bases not mutated not in motif
# number of A[T>G]T occurrences motif_mutation_count
# / number of [T>G] occurrences stat_mutation_count + motif_mutation_count
# ----------
# number of ATT occurrences in DNA context stat_motif_count
# / number of T occurrences in DNA context stat_ref_count + stat_motif_count
contingency_table = make_contingency_table(
motif_mutation=motif_mutation_count,
no_motif_mutation=stat_mutation_count,
motif_no_mutation=stat_motif_count,
no_motif_no_mutation=stat_ref_count)
# data={
# "'{}>{}' mutation".format(ref, alt): [stat_mutation_count, motif_mutation_count],
# "no '{}>{}' mutation".format(ref, alt): [stat_ref_count, stat_motif_count]},
# index=("no '{}' motif".format(motif), "'{}' motif".format(motif)))
logger.debug("\n" + contingency_table.to_string() + "\n")
logger.debug("({} / ({} + {}) ) / ({} / ({} + {}))".format(
contingency_table.loc['mutation', 'motif'],
contingency_table.loc['mutation', 'motif'],
contingency_table.loc['mutation', 'no motif'],
contingency_table.loc['no mutation', 'motif'],
contingency_table.loc['no mutation', 'motif'],
contingency_table.loc['no mutation', 'no motif']))
contingency_table = Haldane_correction(contingency_table)
enrichment = risk_ratio = calculate_RR(contingency_table) # enrichment = risk ratio
odds_ratio = calculate_OR(contingency_table)
p_val = get_stats(contingency_table, stat_type)
mut_load = calculate_mutation_load(motif_mutation_count, enrichment)
result = {
'enrichment': enrichment, # AKA risk ratio
'odds_ratio': odds_ratio,
'mutation_load': math.ceil(mut_load),
'pvalue': p_val,
'bases_mutated_in_motif': motif_mutation_count,
'bases_mutated_not_in_motif': stat_mutation_count,
'bases_not_mutated_in_motif': stat_motif_count,
'bases_not_mutated_not_in_motif': stat_ref_count,
'total_mutations': len(mutations)
}
saved_matches = {
'mutation_motif': matching_mutated_motifs
}
return result, saved_matches
|
neksa/mutagene
|
mutagene/motifs/__init__.py
|
__init__.py
|
py
| 17,419 |
python
|
en
|
code
| 3 |
github-code
|
6
|
5221428710
|
def encode(s):
k = '0123456789abcdefghijklmnopqrstuv'
v = 0
vbits = 0
output = []
for c in s:
v |= ord(c) << vbits
vbits += 8
while vbits >= 5:
output.append(k[v & 31])
v >>= 5
vbits -= 5
if vbits:
output.append(k[v])
return ''.join(output)
def decode(s):
v = 0
vbits = 0
output = []
for c in s.lower():
if c >= '0' and c <= '9':
u = ord(c) - ord('0')
elif c >= 'a' and c <= 'v':
u = ord(c) - ord('a') + 10
else:
raise ValueError('Invalid base-32 input')
v |= u << vbits
vbits += 5
if vbits >= 8:
output.append(chr(v & 255))
v >>= 8
vbits -= 8
if vbits >= 5 or v:
raise ValueError('Invalid base-32 input')
return ''.join(output)
|
agl/dnscurve
|
tools/base32.py
|
base32.py
|
py
| 765 |
python
|
en
|
code
| 24 |
github-code
|
6
|
21836480019
|
import sys
sys.stdin = open('../input.txt', 'r')
def solve(d, next):
global real
if d == 7:
if sum(real) == 100:
print('\n'.join(map(str, sorted(real))))
sys.exit(0)
else:
for i in range(next, 9):
if sum(real) <= 100:
real.append(heights[i])
solve(d+1, next+1)
real.pop()
heights = [int(input()) for _ in range(9)]
real = []
solve(0, 0)
|
liza0525/algorithm-study
|
BOJ/boj_2309_seven_drwaf.py
|
boj_2309_seven_drwaf.py
|
py
| 452 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7713977328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, render_template
import platform
import netifaces
myApp = Flask(__name__)
@myApp.route('/')
def home():
data = {'user': 'ramy', 'machine':platform.node(), 'os':platform.system(), 'dist':platform.linux_distribution(), 'interfaces':netifaces.interfaces()}
return render_template('index.system.html', title='Home', data=data)
if __name__ == '__main__':
myApp.run(host='0.0.0.0', port=999)
|
RMDHMN/pythonFlash_testing
|
system-template.py
|
system-template.py
|
py
| 469 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14765002641
|
from time import strftime
from configuration_validation import extract_data
from configuration_validation import configuration_validation_tool
import os
def main():
user_response = input('Do you want to search for all master.pmc files? [Y/N]\n')
master_file_list = list()
if user_response.lower() == 'y':
master_file_list = configuration_validation_tool.find_all_master()
elif user_response.lower() == 'n':
master_file_list.append(configuration_validation_tool.find_one_master())
else:
print('That is not a valid answer, please try again.')
main()
file_data, mcs = build_file(master_file_list)
export_label_data(mcs, file_data)
print('Cables labels produced.')
answer = input('Would you like to produce more cable labels? [Y/N]\n or type "Other" to select a different tool.\n')
configuration_validation_tool.retry(answer, '3')
def build_file(master_files):
file_data = list()
mcs = 'MCS Not found'
for file in master_files:
controller_frame = import_label_data(file)
cable_label_data = cable_label(controller_frame)
for i in cable_label_data:
axis_formatted_data = format_cable_label(i)
file_data.append(axis_formatted_data)
mcs = str(cable_label_data[0]['mcs'])
return file_data, mcs
def import_label_data(master_path):
# Imports the raw data required for the labels from a '.e.pmc' file.
data_fields = ['e_BrickN_P',
'e_DevName_P',
'e_AxisID_P',
'b_AxisN_P',
'a_Commutation_D',
'a_IsBrakeUsed_X',
'e_StageEncPortN_X',
'b_StageEncPortN_P',
'e_MotEncPortN_X',
'b_MotEncPortN_P',
'a_IsEncoderUsed_X',
'e_BrickIP_P',
]
controller_frame = extract_data.main(master_path, data_fields)
return controller_frame
def cable_label(controller_data):
# Applies consistent wording for variables, and imports encoder details if required.
data = list()
aux = dict()
for i, v in controller_data.iterrows():
label = dict()
i = int(i)
if controller_data.iloc[i]['a_Commutation_D'].lower() == "'directmicrostepping'":
label['motor_type'] = 'Stepper'
elif controller_data.iloc[i]['a_Commutation_D'].lower() == "'notdirectmicrostepping'":
label['motor_type'] = 'Stepper'
elif controller_data.iloc[i]['a_Commutation_D'].lower() == "'brushlessdc'":
label['motor_type'] = 'BrushlessDC'
elif controller_data.iloc[i]['a_Commutation_D'].lower() == "'dcbrush'":
label['motor_type'] = 'DCBrushed'
else:
print(controller_data.iloc[i]['a_Commutation_D'])
label['motor_type'] = 'Error'
if controller_data.iloc[i]['a_IsEncoderUsed_X'] == '1':
label['enc_type'] = 'ENC'
label['stage_enc_axis'] = controller_data.iloc[i]['b_StageEncPortN_P']
label['motor_enc_axis'] = controller_data.iloc[i]['b_MotEncPortN_P']
else:
label['enc_type'] = False
label['stage_enc_axis'] = 'none'
label['motor_enc_axis'] = 'none'
if controller_data.iloc[i]['a_IsBrakeUsed_X'] == '1':
aux['maux'] = 'aux'
aux['mcs'] = strip_grab(controller_data, 'e_BrickN_P', i)
label['mcs'] = strip_grab(controller_data, 'e_BrickN_P', i)
label['axis'] = strip_grab(controller_data, 'b_AxisN_P', i)
label['axisname'] = strip_grab(controller_data, 'e_AxisID_P', i)
label['devname'] = strip_grab(controller_data, 'e_DevName_P', i)
data.append(label.copy())
data.append(aux.copy())
return data
def format_cable_label(label_data):
# Formats the data for use with BMP-71 labeler.
formatted_data = list()
valid_mot_axis = ['Stepper', 'BrushlessDC', 'DCBrushed', ]
valid_enc_axis = ['1', '2', '3', '4', '5', '6', '7', '8', ]
if 'maux' in label_data and label_data['maux'] == 'aux':
formatted_data.append('MCS{0},MAUX,\n'.format(label_data['mcs']))
return formatted_data
if 'motor_type' in label_data and label_data['motor_type'] in valid_mot_axis:
formatted_data.append('MCS{0},{1},#{2} - {3},:{4},\n'.format(label_data['mcs'],
label_data['motor_type'],
label_data['axis'],
label_data['devname'],
label_data['axisname'], ))
elif 'motor_type' in label_data and label_data['motor_type'] not in valid_mot_axis:
formatted_data.append('Motor Type not found!,ERROR,ERROR,ERROR,\n')
if 'enc_type' in label_data and label_data['enc_type'] is not False and label_data['stage_enc_axis'] in \
valid_enc_axis:
formatted_data.append('MCS{0},{1},#{2} - {3},:{4},\n'.format(label_data['mcs'],
label_data['enc_type'],
label_data['stage_enc_axis'],
label_data['devname'],
label_data['axisname']))
if 'enc_type' in label_data and label_data['enc_type'] is not False and label_data['motor_enc_axis'] in \
valid_enc_axis:
formatted_data.append('MCS{0},{1},#{2} - {3},:{4},\n'.format(label_data['mcs'],
label_data['enc_type'],
label_data['motor_enc_axis'],
label_data['devname'],
label_data['axisname']))
return formatted_data
def export_label_data(mcs, data):
# Writs the data to a csv file.
mcs = str(mcs)
output_path = os.getcwd().rsplit('\\', 1)[0]
filename = output_path + '\\output\\Labels_MCS{0}_{1}'.format(mcs, strftime("%Y%m%d%H%M%S"))
with open(filename + '.csv', 'w') as f:
for label in data:
for line in label:
f.write(line)
f.close()
def strip_grab(dataframe, var, i):
# Strips values of unnecessary quotation marks.
try:
value = dataframe.iloc[i][var]
except KeyError:
value = ' No valid data.'
if isinstance(value, str):
value = value.strip("'")
return value
else:
return value
|
naderafsh/configuration_validation
|
configuration_validation/cable_labels.py
|
cable_labels.py
|
py
| 6,895 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2642837677
|
def majorityElement(nums):
# Mooreโs Voting Algorithm
n = len(nums)
count = 0
element = None
for i in range(n):
if count == 0:
count = 1
element = nums[i]
elif element == nums[i]:
count += 1
else:
count -= 1
# Checking if the stored element is the majority element
count2 = 0
for i in range(n):
if nums[i] == element:
count2 += 1
if count2 > (n / 2):
return element
return -1
nums = [2, 2, 1, 1, 1, 2, 2]
print(majorityElement(nums))
|
ArunRawat404/DSA
|
Array/Medium/3. Majority Element.py
|
3. Majority Element.py
|
py
| 586 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8167072903
|
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import regularizers
import numpy as np
import pandas as pd
import math as math
import sys
import os
import keras
from keras.models import load_model
from keras.layers import Dropout , Flatten
from keras.layers import BatchNormalization
from keras.preprocessing.text import text_to_word_sequence
from keras.preprocessing.text import one_hot
import string
from keras.layers import MaxPooling1D
from keras.layers import Flatten
from keras.layers import ConvLSTM2D
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import LSTM,GRU,TimeDistributed
from keras.layers import Dense
from keras.layers.embeddings import Embedding
from gensim.models.word2vec import Word2Vec
def normal_string(string):
if not string:
return ""
if len(string) <= 2:
return string
if len(string) > 2 and string[0] == string[1] and string[1] == string[2]:
return normal_string(string[1:])
return string[0] + normal_string(string[1:])
def remove_space(text):
index_list = [i for i, letter in enumerate(text) if letter == '\'']
remove_list = []
for i in range(0,len(index_list)):
if index_list[i]-1 >= 0 and text[index_list[i]-1] == ' ':
remove_list.append(index_list[i]-1)
if index_list[i]+1 < len(text) and text[index_list[i]+1] == ' ':
remove_list.append(index_list[i]+1)
#remove_list.append(index_list[i])
text = "".join([char for idx, char in enumerate(text) if idx not in remove_list])
return text
mode = sys.argv[3]
test_data_filename = sys.argv[1]
t_lines = [line.rstrip('\n') for line in open(test_data_filename,'r' , errors='replace' , encoding='utf-8')]
t_lines = t_lines[1:]
for i in range(0,len(t_lines)):
num = len(str(i))
t_lines[i] = t_lines[i][num+1:]
w2v_t_lines = []
for i in range(0, len(t_lines)):
t_lines[i] = remove_space(t_lines[i])
tk = text_to_word_sequence(t_lines[i], filters='', lower=True, split=' ')
tmp_line = []
tmp = ""
for j in range(0,len(tk)):
tk[j] = tk[j].encode("ascii", errors="ignore").decode()
tk[j] = normal_string(tk[j])
tmp_line.append(tk[j])
tmp = tmp + tk[j] + " "
t_lines[i] = tmp
w2v_t_lines.append(tmp_line)
model = Word2Vec.load("gensim_w2v_0.82693_0602_model")
word_vectors = model.wv
vocab = []
for k, v in word_vectors.vocab.items():
vocab.append( (k,v.index) )
vocab = sorted(vocab , key=lambda x:x[1])
word_index_dict = {}
for i in range(0,len(vocab)):
word = vocab[i][0]
word_index_dict[word] = i+1
word_index_dict["unknown_word"] = len(vocab)+1
test_ind = []
for i in range(len(w2v_t_lines)):
tmp = []
for w in w2v_t_lines[i]:
if w not in word_index_dict:
tmp.append(word_index_dict["unknown_word"])
else:
tmp.append(word_index_dict[w])
test_ind.append(tmp)
rnn_model = load_model("0602_gensim_0.82693.h5")
test = sequence.pad_sequences(test_ind, maxlen=33)
p = 0.0
p += rnn_model.predict(test)
ans_filename = sys.argv[2]
ans_file = open(ans_filename , 'w')
ans_file.write("id,label\n")
for i in range(0,len(p)):
ans_file.write(str(i))
ans_file.write(',')
if p[i][0] >= 0.5:
ans_file.write('1')
else:
ans_file.write('0')
ans_file.write('\n')
|
muachilin/Machine-Learning
|
hw5/hw5_test.py
|
hw5_test.py
|
py
| 3,172 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5200586519
|
"""
This module contains the main transmittance/reflectance calculation
bits.
Users can run the calculations through `model.Model()` and avoid
accessing `core` directly.
"""
import numpy as np
import scipy as sp
def rt_amp(index, delta, theta, pol):
"""Calculate the reflected and transmitted amplitudes through the
system.
Parameters
----------
index : numpy array
An array of refractive indices, ordered from source layer to
terminator layer.
delta : numpy array
An array of wavenumber offsets.
theta : numpy array
An array of angles in radians.
pol : string
The polarization of the source wave: 's' or 'p',
or 'u'.
Returns
-------
r, t : tuple
A tuple where 'r' is the reflected amplitude, and 't' is the
transmitted amplitude.
"""
t_amp, r_amp = make_rt_amp_matrix(index, theta, pol)
m_mat = make_m_matrix(index, t_amp, r_amp, delta)
m_prime = make_2x2(1., 0., 0., 1., dtype=complex)
for i in range(1, len(index)-1):
m_prime = np.dot(m_prime, m_mat[i])
C_m = make_2x2(1., r_amp[0, 1], r_amp[0, 1], 1., dtype=complex)
m_prime = np.dot(C_m / t_amp[0, 1], m_prime)
trans_amp = 1 / m_prime[0, 0]
ref_amp = m_prime[1, 0] / m_prime[0, 0]
return ref_amp, trans_amp
def make_rt_amp_matrix(index, theta, pol):
"""Construct reflection and transmission amplitude matrices.
Parameters
----------
index : numpy array
An array of refractive indices, ordered from source layer to
terminator layer.
theta : numpy array
An array of angles in radians.
pol : string
The polarization of the source wave: 's' or 'p'.
Returns
-------
t_mat, r_mat : tuple
The t- and r-amplitude matrices.
"""
t_mat = np.zeros((len(index), len(index)), dtype=complex)
r_mat = np.zeros((len(index), len(index)), dtype=complex)
for i in range(len(index) - 1):
t_mat[i, i+1] = t_interface(index[i], index[i+1], theta[i], theta[i+1], pol)
r_mat[i, i+1] = r_interface(index[i], index[i+1], theta[i], theta[i+1], pol)
return t_mat, r_mat
def make_m_matrix(index, t_matrix, r_matrix, delta):
"""Construct the characteristic matrix of the model.
Parameters
----------
index : numpy array
An array of refractive indices, ordered from source layer to
terminator layer.
t_matrix : numpy array
The t-amplitude matrix
r_matrix : numpy array
The r-amplitude matrix
delta : numpy array
An array of wavenumber offsets.
Returns
-------
m_mat : numpy array
The characteristic matrix of the model
"""
m_mat = np.zeros((len(index), 2, 2), dtype=complex)
for i in range(1, len(index)-1):
C_m = make_2x2(np.exp(-1j * delta[i]), 0., 0., np.exp(1j * delta[i]),
dtype=complex)
r_m = make_2x2(1., r_matrix[i, i+1], r_matrix[i, i+1], 1., dtype=complex)
m_mat[i] = (1 / t_matrix[i, i+1]) * np.dot(C_m, r_m)
return m_mat
def r_power(r_amp):
"""Return the fraction of reflected power.
Parameters
----------
r_amp : float
The net reflection amplitude after calculating the transfer
matrix.
Returns
-------
R : numpy array
The model reflectance
"""
return np.abs(r_amp)**2
def t_power(t_amp, index_i, index_f, theta_i, theta_f):
"""Return the fraction of transmitted power.
Parameters
----------
t_amp : float
The net transmission amplitude after calculating the transfer
matrix.
index_i : float
The index of refraction of the source material.
index_f : float
The index of refraction of the terminating material.
theta_i : float
The angle of incidence (radians) at the initial interface.
theta_f : float
The angle of incidence (radians) at the final interface.
Returns
-------
T : numpy array
The model transmittance
"""
return np.abs(t_amp**2) * \
( (index_f * np.cos(theta_f)) / (index_i * np.cos(theta_i) ) )
def r_interface(index1, index2, theta1, theta2, pol):
"""Calculate the reflected amplitude at an interface.
Parameters
----------
index1 : float
The index of refraction of the first material.
index2 : float
The index of refraction of the second material.
theta1 : float
The angle of incidence at interface 1, in radians
theta2 : float
The angle of incidence at interface 2, in radians
pol : string
The polarization of the source wave (either 's' or 'p').
Returns
-------
reflected amplitude : float
The amplitude of the reflected field at the interface
"""
if pol == 's':
numerator = (index1 * np.cos(theta1) - index2 * np.cos(theta2))
denominator = (index1 * np.cos(theta1) + index2 * np.cos(theta2))
elif pol == 'p':
numerator = (index2 * np.cos(theta1) - index1 * np.cos(theta2))
denominator = (index1 * np.cos(theta2) + index2 * np.cos(theta1))
else:
raise ValueError("Polarization must be 's' or 'p'")
return numerator / denominator
def t_interface(index1, index2, theta1, theta2, pol):
"""Calculate the transmission amplitude at an interface.
Parameters
----------
index1 : float
The index of refraction of the first material.
index2 : float
The index of refraction of the second material.
theta1 : float
The angle of incidence at interface 1, in radians
theta2 : float
The angle of incidence at interface 2, in radians
pol : string
The polarization of the source wave (either 's' or 'p').
Returns
-------
transmitted_amplitude : float
The amplitude of the transmitted field at the interface
"""
if pol == 's':
numerator = 2 * index1 * np.cos(theta1)
denominator = (index1 * np.cos(theta1) + index2 * np.cos(theta2))
elif pol == 'p':
numerator = 2 * index1 * np.cos(theta1)
denominator = (index1 * np.cos(theta2) + index2 * np.cos(theta1))
else:
raise ValueError("Polarization must be 's' or 'p'")
return numerator / denominator
def wavenumber(freq, index, tand):
"""Calculate the wavenumber in a material.
Parameters
----------
freq : float
The frequency at which to calculate the wavevector, k
tand : numpy array
An array of loss tangents, ordered from source to terminating
index : numpy array
An array of refractive indices, ordered from source to
terminating layer
Returns
-------
k : array
The complex wavenumber, k
"""
k = 2 * np.pi * (freq / 3e8) * index * np.sqrt(1 + 1j * tand)
return k
def alpha2imagn(freq, a, b, n):
"""Convert Halpern's 'a' and 'b' from an absorption coefficient
of the form `a*freq**b` to a (frequency-dependent) .
Parameters
----------
freq : numpy array or float
The frequency (Hz) (or frequencies) at which to calculate the loss
tangent.
a : float
Halpern's 'a' coefficient
b : float
Halpern's 'b' coefficient
n : float
The real part of the material's refractive index
Returns
-------
imagn : numpy array or float
The imaginary component of the refractive index
"""
nu = freq / 30e9
# First we need the frequency-dependent absorption coefficient,
# alpha, which we get from the Halpern fit. From that we will
# calculate k(appa), the extinction coefficient, for each
# frequency of interest
alpha = 2 * a * nu**b
# This is the absorption-extinction coefficient relation as ~written
# in Born & Wolf Principles of Optics 1st Ed., 1959, Ch. 13.1,
# Pg. 614, Eq. 21
# The factor of 3e10 (c in units of cms^-1) ensures that our k is
# unitless, as it ought to be.
imagn = (100 * 3e8 * alpha) / (4 * np.pi * n * freq)
return imagn
def alpha2tand(freq, a, b, n):
"""Convert Halpern's 'a' and 'b' from an absorption coefficient
of the form `a*freq**b` to a (frequency-dependent) loss tangent.
Parameters
----------
freq : numpy array or float
The frequency (Hz) (or frequencies) at which to calculate the loss
tangent.
a : float
Halpern's 'a' coefficient
b : float
Halpern's 'b' coefficient
n : float
The real part of the material's refractive index
Returns
-------
tand : numpy array
The loss tangent of the material at the given frequency and
Halpern coefficients.
"""
imagn = alpha2imagn(freq, a, b, n)
# The complex index of refraction of a material is related to the
# complex (relative) permittivity by the relation:
# e_r = e' + i*e'' = n^2 = (n + i*k)^2 = n^2 - k^2 + i*2nk
# By equating the real and imaginary parts we are left with:
# e' = (n^2 - k^2); e'' = 2nk
# With this information we can find the loss tangent, which is simply
# the ratio of the real and imaginary parts of the relative
# permittivity:
# tand = (e''/e')
ep = n**2 - imagn**2
epp = 2 * n * imagn
tand = epp / ep
return tand
def make_2x2(a11, a12, a21, a22, dtype=float):
"""Return a 2x2 array quickly.
Parameters
----------
a11 : float
Array element [0, 0].
a12 : float
Array element [0, 1].
a21 : float
Array element [1, 0].
a22 : float
Array element [1, 1].
dtype : dtype, optional
The datatype of the array. Defaults to float.
Returns
-------
array : numpy array
A 2x2 array [[a11, a12], [a21, a22]]
"""
array = np.empty((2, 2), dtype=dtype)
array[0, 0] = a11
array[0, 1] = a12
array[1, 0] = a21
array[1, 1] = a22
return array
def prop_wavenumber(k, d, theta):
"""Propagate the wave through a material and calculate its offset,
delta.
Parameters
----------
k : array
The wavenumber
d : array
An array of distances (thicknesses), ordered from source to
terminating layer
theta : float
The angle the wave passes through the medium
Returns
-------
delta : array
The phase difference
"""
# Turn off 'invalid multiplication' error; it's just the 'inf' boundaries
olderr = sp.seterr(invalid='ignore')
delta = k * d * np.cos(theta)
# Now turn the error back on
sp.seterr(**olderr)
return delta
def refract(n, theta0):
"""Calculate the angle by which an incident ray is refracted
Parameters
----------
n : numpy array
An array of refractive indices, ordered from source layer to
terminator layer.
theta0 : float
The initial angle of incidence (radians)
Returns
-------
thetas : numpy array
The Snell angles at each interface
"""
# Make a nice pairwise generator so we can avoid playing games with
# index counting
thetas = [theta0]
ngen = zip(n, n[1:])
for i, rind in enumerate(ngen):
theta = np.arcsin(np.real_if_close( rind[0] * np.sin(thetas[i]) / rind[1] ))
thetas.append(theta)
return np.asarray(thetas)
def replace_tand(freq, tand_array, halpern_dict):
"""Calculate a frequency-dependent loss tangent from a material's
Halpern coefficiencts if they exist.
Parameters
----------
freq : float
The frequency at which to calculate the loss tangent
tand_array : numpy array
The loss tangents of the materials, ordered from Source to
Terminator
halpern_dict : dict
A dictionary keyed by layer index, containing Halpern coefficients
Returns
-------
tand_array : numpy array
The loss tangents of the materials, ordered from Source to
Terminator. Where possible, the Halpern coefficients have been
applied to make the terms frequency-dependent.
"""
for k, v in halpern_dict.items():
tand_array[k] = alpha2tand(freq, v['a'], v['b'], v['n'])
return tand_array
def main(params):
"""Run a transmittance/reflectance calculation for the given parameters.
This function is the primary entry-point to the calculation, and should
not be called directly. Instead, call `Model.run()`.
If you must call `core.main()` directly, only do so after first calling
`Model.set_up()`.
Parameters
----------
params : dict
The dictionary contructed by `Model.set_up`. See that function
documentation for details.
Returns
-------
result : dict
A dictionary with three keys:
* `frequency`: the frequency (in Hz) at which T and R were calculated
* `transmittance`: the output transmittance (T) of the model
* `reflectance`: the output reflectance (R) of the model
"""
rind = params['rind']
thick = params['thick']
tand = params['tand']
pol = params['pol']
theta0 = params['theta0']
theta = refract(rind, theta0)
freq = params['freq']
halps = params['halpern_layers']
# Create containers for the reflection/transmission values we calculate
# at each frequency
ts = []
rs = []
for f in freq:
if len(halps.keys()) > 0:
tand = replace_tand(f, tand, halps)
ks = wavenumber(f, rind, tand)
delta = prop_wavenumber(ks, thick, theta)
r_amp, t_amp = rt_amp(rind, delta, theta, pol)
t_pow = t_power(t_amp, rind[0], rind[-1], theta[0], theta[-1])
r_pow = r_power(r_amp)
ts.append(t_pow)
rs.append(r_pow)
ts = np.asarray(ts)
rs = np.asarray(rs)
results = {'frequency':freq, 'transmittance':ts, 'reflectance':rs}
return results
|
anadolski/armmwave
|
armmwave/core.py
|
core.py
|
py
| 13,897 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8762469387
|
from llm_rs import SessionConfig, GenerationConfig, Gpt2
class Chainer:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(Chainer, cls).__new__(cls)
cls._instance.init_chainer()
return cls._instance
def init_chainer(self):
self.stop_words = ['<EOL>', '<eol>', '<Eol>', 'pertanyaan :', 'Human', 'human', 'Pertanyaan', '\n']
self.previous_qa = []
session_config = SessionConfig(
threads=4,
context_length=1300,
prefer_mmap=False
)
self.generation_config = GenerationConfig(
top_p=0.44,
top_k=1,
temperature=0.22,
max_new_tokens=120,
repetition_penalty=1.13,
stop_words=self.stop_words
)
self.model = Gpt2("2midguifSfFt5SbHJsxP.bin", session_config=session_config)
def chain(self, user_input):
if self.previous_qa:
previous_question, previous_answer = self.previous_qa[-1]
else:
previous_question, previous_answer = "", ""
template = f"saya bisa menjawab pertanyaan dengan masalah kesehatan.\nHai! Saya adalah chatbot yang akan menjawab pertanyaan seputar kesehatan. Saya adalah chatbot, bukan manusia.\nanda dapat menanyakan saya pertanyaan seputar kesehatan melalui kolom teks dibawah.\n\nPertanyaan saya:\n{previous_question}\n\nJawaban anda:\n{previous_answer}\n\nPertanyaan saya: {user_input}.\nJawaban anda :"
result = self.model.generate(template, generation_config=self.generation_config)
response = result.text.strip()
self.previous_qa.append((user_input, response))
if len(self.previous_qa) > 1:
self.previous_qa.pop(0)
return response
generator = Chainer()
def generate(text_input):
result = generator.chain(text_input)
return result
|
andri-jpg/termux-fa
|
lib.py
|
lib.py
|
py
| 1,917 |
python
|
en
|
code
| 2 |
github-code
|
6
|
45386585406
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from theory.conf import settings
from theory.utils.safestring import markSafe
from theory.utils import six
def format(number, decimalSep, decimalPos=None, grouping=0, thousandSep='',
forceGrouping=False):
"""
Gets a number (as a number or string), and returns it as a string,
using formats defined as arguments:
* decimalSep: Decimal separator symbol (for example ".")
* decimalPos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousandSep: Thousand separator symbol (for example ",")
"""
useGrouping = settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR
useGrouping = useGrouping or forceGrouping
useGrouping = useGrouping and grouping > 0
# Make the common case fast
if isinstance(number, int) and not useGrouping and not decimalPos:
return markSafe(six.textType(number))
# sign
sign = ''
strNumber = six.textType(number)
if strNumber[0] == '-':
sign = '-'
strNumber = strNumber[1:]
# decimal part
if '.' in strNumber:
intPart, decPart = strNumber.split('.')
if decimalPos is not None:
decPart = decPart[:decimalPos]
else:
intPart, decPart = strNumber, ''
if decimalPos is not None:
decPart = decPart + ('0' * (decimalPos - len(decPart)))
if decPart:
decPart = decimalSep + decPart
# grouping
if useGrouping:
intPartGd = ''
for cnt, digit in enumerate(intPart[::-1]):
if cnt and not cnt % grouping:
intPartGd += thousandSep
intPartGd += digit
intPart = intPartGd[::-1]
return sign + intPart + decPart
|
grapemix/theory
|
theory/utils/numberformat.py
|
numberformat.py
|
py
| 1,639 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4991495509
|
#!/usr/bin/python3
# enable debugging
import cgi, cgitb
import json
import requests
import responses
cgitb.enable()
class Expense:
def __init__(self, exp_name,exp_date,exp_amount,exp_type):
self.name = exp_name
self.date = exp_date
self.amount = exp_amount
self.type = exp_type
form = cgi.FieldStorage()
exp_name = form.getvalue('exp_name')
exp_date = form.getvalue('exp_date')
exp_amount = form.getvalue('exp_amount')
exp_type = form.getvalue('exp_type')
expense = Expense(exp_name,exp_date,exp_amount,exp_type)
jsonString = json.dumps(expense.__dict__)
jsonFile = open("/var/www/html/data.json", "a+")
jsonFile.write(jsonString)
jsonFile.close()
print('Content-Type: text/plain')
print('')
print('sucessful')
print('<br>')
print(jsonString)
|
eliz-liu/money_site_html
|
form.py
|
form.py
|
py
| 795 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31975617255
|
from django import template
from ..models import Page
register = template.Library()
@register.simple_tag
def main_menu():
"Query top-level pages"
return Page.objects.with_tree_fields().filter(
parent=None, is_active=True)
|
dnknth/feincms-demo
|
pages/templatetags/menus.py
|
menus.py
|
py
| 240 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18801853357
|
import pytest
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel
from utils.util_log import test_log as log
# customer rg
rg_name_0 = "RG_0"
rg_name_1 = "RG_1"
# coll name
coll_name_1 = "ResourceGroup_111"
coll_name_2 = "ResourceGroup_222"
# resource group info of 4 qns
resource_group_info = [
{"name": rg_name_0, "available_node": 1, "capacity": 1, "loaded_replica": {coll_name_1: 1}},
{"name": rg_name_1, "available_node": 1, "capacity": 1, "loaded_replica": {coll_name_1: 1}},
{"name": ct.default_resource_group_name, "available_node": 2,
"capacity": ct.default_resource_group_capacity, "loaded_replica": {coll_name_2: 2}}
]
class TestChaosRG(TestcaseBase):
""" Test case of end to end"""
def teardown_method(self, method):
log.info(("*" * 35) + " teardown " + ("*" * 35))
log.info("[teardown_method] Start teardown test case %s..." %
method.__name__)
log.info("skip drop collection")
@pytest.mark.tags(CaseLabel.L3)
def test_milvus_resource_group(self):
nb = 10000
# collection rg map
collection_rg_map = {
coll_name_1: {"resource_groups": [rg_name_0, rg_name_1], "replica_number": 2},
coll_name_2: {"resource_groups": [ct.default_resource_group_name], "replica_number": 2}
}
self._connect()
# create RG_0, RG_1, transfer 1 node to RG_0, 1 node to RG_1
for rg_info in resource_group_info:
rg_name = rg_info["name"]
if rg_name != ct.default_resource_group_name:
_, create_rg_res = self.utility_wrap.create_resource_group(rg_name)
assert create_rg_res
log.info(f"[ResourceGroup] Create rg {rg_name} done")
self.utility_wrap.transfer_node(source=ct.default_resource_group_name, target=rg_name,
num_node=rg_info["available_node"])
log.info(
f'[ResourceGroup] Transfer {rg_info["available_node"]} nodes from {ct.default_resource_group_name} to {rg_name} done')
# verify RGs
resource_groups, _ = self.utility_wrap.list_resource_groups()
assert len(resource_groups) == len(resource_group_info)
assert all([rg_info["name"] in resource_groups for rg_info in resource_group_info])
for rg_info in resource_group_info:
rg_info = {"name": rg_info["name"],
"capacity": rg_info["capacity"],
"num_available_node": rg_info["available_node"],
"num_loaded_replica": {},
"num_outgoing_node": {},
"num_incoming_node": {}
}
desc_rg_info, _ = self.utility_wrap.describe_resource_group(name=rg_info["name"],
check_task=ct.CheckTasks.check_rg_property,
check_items=rg_info)
log.info(f'[ResourceGroup] Rg of {rg_info["name"]} info is: {desc_rg_info}')
# prepare collection C1, C2
# create
data = cf.gen_default_dataframe_data(nb=nb)
index_params = {"index_type": "HNSW", "metric_type": "L2", "params": {"M": 48, "efConstruction": 500}}
for coll_name in coll_name_1, coll_name_2:
# create
collection_w = self.init_collection_wrap(name=coll_name, active_trace=True)
log.info(f"create collection {collection_w.name} done")
entities = collection_w.num_entities
# insert
_, res = collection_w.insert(data)
assert res
log.info(f"insert {nb} entities done")
# flush
_, check_result = collection_w.flush(timeout=180)
assert check_result
assert collection_w.num_entities == nb + entities
entities = collection_w.num_entities
log.info(f"flush done with entities: {entities}")
# index
index, _ = collection_w.create_index(field_name=ct.default_float_vec_field_name,
index_params=index_params,
index_name=cf.gen_unique_str())
index, _ = collection_w.create_index(field_name=ct.default_string_field_name,
index_params={},
index_name=cf.gen_unique_str())
index_infos = [index.to_dict() for index in collection_w.indexes]
log.info(f"index info: {index_infos}")
# load coll_rg_a, 2 replicas -> RG_0, RG_1
# load coll_rg_b, 2 replicas -> default_RG
collection_w.load(replica_number=collection_rg_map[coll_name]["replica_number"],
_resource_groups=collection_rg_map[coll_name]["resource_groups"])
# show query segment info
segment_info, _ = self.utility_wrap.get_query_segment_info(collection_w.name)
log.info(f"{collection_w.name} segment info: {segment_info}")
# show replicas info
replicas, _ = collection_w.get_replicas()
log.info(f"{collection_w.name} replica info: {replicas}")
# search
search_vectors = cf.gen_vectors(ct.default_nq, ct.default_dim)
search_params = {"metric_type": "L2", "params": {"ef": 64}}
search_res, _ = collection_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
param=search_params, limit=ct.default_limit, expr="int64 >= 0")
assert len(search_res) == ct.default_nq
assert len(search_res[0]) == ct.default_limit
# query and delete
term_expr = f'{ct.default_int64_field_name} < 100'
query_res, _ = collection_w.query(term_expr)
assert len(query_res) == 100
delete_expr = f'{ct.default_int64_field_name} in {[i for i in range(100)]}'
collection_w.delete(delete_expr)
collection_w.query(term_expr, check_task=ct.CheckTasks.check_query_empty)
# verify rg replica info
for rg_info in resource_group_info:
rg_info = {"name": rg_info["name"],
"capacity": rg_info["capacity"],
"num_available_node": rg_info["available_node"],
"num_loaded_replica": rg_info["loaded_replica"],
"num_outgoing_node": {},
"num_incoming_node": {}
}
desc_rg_info_2, _ = self.utility_wrap.describe_resource_group(name=rg_info["name"],
check_task=ct.CheckTasks.check_rg_property,
check_items=rg_info)
log.info(f'[ResourceGroup] Rg of {rg_info["name"]} info is: {desc_rg_info_2}')
@pytest.mark.tags(CaseLabel.L3)
def test_verify_milvus_resource_group(self):
self._connect()
# verify collection exist
all_collections, _ = self.utility_wrap.list_collections()
assert all(coll_name in all_collections for coll_name in [coll_name_1, coll_name_2])
# verify resource groups
for rg_info in resource_group_info:
rg_info = {"name": rg_info["name"],
"capacity": rg_info["capacity"],
"num_available_node": rg_info["available_node"],
"num_loaded_replica": rg_info["loaded_replica"],
"num_outgoing_node": {},
"num_incoming_node": {}
}
desc_rg_info, _ = self.utility_wrap.describe_resource_group(name=rg_info["name"],
check_task=ct.CheckTasks.check_rg_property,
check_items=rg_info)
log.info(f'[ResourceGroup] Rg of {rg_info["name"]} info is: {desc_rg_info}')
# search
for coll_name in coll_name_2, coll_name_1:
# get query segment info
segment, _ = self.utility_wrap.get_query_segment_info(coll_name)
log.info(f"{coll_name} query segment info: {segment}")
# get replicas
collection_w = self.init_collection_wrap(name=coll_name, active_trace=True)
replicas, _ = collection_w.get_replicas(check_task=ct.CheckTasks.check_nothing)
log.info(f"{coll_name} replicas: {replicas}")
# search
for i in range(100):
search_vectors = cf.gen_vectors(ct.default_nq, ct.default_dim)
search_params = {"metric_type": "L2", "params": {"ef": 64}}
search_res, _ = collection_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
param=search_params, limit=ct.default_limit, expr="int64 >= 0")
assert len(search_res) == ct.default_nq
assert len(search_res[0]) == ct.default_limit
# show query segment info finally
segment_2, _ = self.utility_wrap.get_query_segment_info(coll_name)
log.info(f"{coll_name} query segment info: {segment_2}")
# show replicas finally
replicas_2, _ = collection_w.get_replicas()
log.info(f"{coll_name} replicas: {replicas_2}")
|
milvus-io/milvus
|
tests/python_client/chaos/testcases/test_chaos_resource_group.py
|
test_chaos_resource_group.py
|
py
| 9,886 |
python
|
en
|
code
| 24,190 |
github-code
|
6
|
39865467891
|
from IPython import get_ipython
def type_of_script():
"""
Detects and returns the type of python kernel
:return: string 'jupyter' or 'ipython' or 'terminal'
"""
try:
ipy_str = str(type(get_ipython()))
if 'zmqshell' in ipy_str:
return 'jupyter'
if 'terminal' in ipy_str:
return 'ipython'
except:
return 'terminal'
if type_of_script() == 'jupyter':
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
import matplotlib.pyplot as plt # type: module
import matplotlib.ticker as ticker
from matplotlib import colormaps
from matplotlib.colors import Normalize
import matplotlib.gridspec as gridspec
import numpy as np
import os, glob
import time
import warnings
from rur.fortranfile import FortranFile
from rur import uri, uhmi, painter, drawer
from rur.sci.photometry import measure_luminosity
from rur.sci.geometry import get_angles, euler_angle
from rur.utool import rotate_data
from scipy.ndimage import gaussian_filter
uri.timer.verbose=1
# from rur.sci.kinematics import f_getpot
from icl_IO import mode2repo, pklsave, pklload
from icl_tool import *
from icl_numba import large_isin, large_isind, isin
from icl_draw import drawsnap, add_scalebar, addtext, MakeSub_nolabel, label_to_in, fancy_axis, circle
import argparse, subprocess
from importlib import reload
import cmasher as cmr
from copy import deepcopy
from multiprocessing import Pool, shared_memory
mode = 'nh'
iout = 1026
repo, rurmode, dp = mode2repo(mode)
snap = uri.RamsesSnapshot(repo, iout, mode=rurmode)
snaps = uri.TimeSeries(snap)
snaps.read_iout_avail()
nout = snaps.iout_avail['iout']
gals = uhmi.HaloMaker.load(snap, galaxy=True, double_precision=dp)
hals = uhmi.HaloMaker.load(snap, galaxy=False, double_precision=dp)
database = f"/home/jeon/MissingSat/database"
from common_func import *
tree = pklload(f"{database}/02_main_progenitors.pickle")
if(os.path.exists(f"{database}/halo_dict.pickle")):
halos = pklload(f"{database}/halo_dict.pickle")
else:
halos = {'catalog':{}, 'index':{}}
uri.timer.verbose=0
for iout in tqdm(np.unique(tree['timestep'])):
isnap = snaps.get_snap(iout)
ihals = uhmi.HaloMaker.load(isnap, galaxy=False, double_precision=dp)
indicies = np.zeros(len(ihals), dtype=int)
iids = tree[tree['timestep'] == iout]['id']
ihals = ihals[iids-1]
indicies[iids-1] = np.arange(len(iids))
halos['catalog'][iout] = ihals
halos['index'][iout] = indicies
pklsave(halos, f"{database}/halo_dict.pickle")
def _ibox(h, factor=1):
return np.array([
[h['x']-factor*h['r'], h['x']+factor*h['r']],
[h['y']-factor*h['r'], h['y']+factor*h['r']],
[h['z']-factor*h['r'], h['z']+factor*h['r']]
])
uri.timer.verbose=0
for iout in np.unique(tree['timestep'])[::-1]:
if(os.path.exists(f"{database}/main_prog/cpulist/cpulist_{iout:05d}.pickle")): continue
cpudict = {}
targets = halos['catalog'][iout]
isnap = snaps.get_snap(iout)
cpulists = []
with Pool(32) as pool:
async_result = [
pool.apply_async(
uri.get_cpulist,
(_ibox(h,factor=1.1), None, isnap.levelmax, isnap.bound_key, isnap.ndim, 5, isnap.ncpu)
) for h in targets
]
iterobj = tqdm(async_result, total=len(targets), desc=f"iout={iout:05d}")
for r in iterobj:
cpulists.append(r.get())
cpulists = np.unique( np.concatenate(cpulists) )
cpudict['all'] = cpulists
pklsave(cpudict, f"{database}/main_prog/cpulist/cpulist_{iout:05d}.pickle")
print(f"`{database}/main_prog/cpulist/cpulist_{iout:05d}.pickle` save done")
isnap.clear()
|
syj3514/MissingSat
|
befo231205/05b_get_involved_cpu.py
|
05b_get_involved_cpu.py
|
py
| 3,847 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20841031996
|
from sklearn.model_selection import train_test_split
from sklearn import svm
def svm_classification(X, y, C_in, gamma_in, kernel_in):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=y, random_state=42)
classifierSVM = svm.SVC(C=C_in, degree=2, gamma=gamma_in, kernel=kernel_in)
# training
classifierSVM.fit(X_train, y_train)
# prediksi data test
y_pred_SVM = classifierSVM.predict(X_test)
# return X_train, X_test, y_train, y_test, classifierSVM, y_pred_SVM
return classifierSVM, y_pred_SVM, y_test
|
mfaisalafandi/identification_teks_ulasan_svm
|
Klasifikasi.py
|
Klasifikasi.py
|
py
| 566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71474372027
|
# Rotating or flipping an image
from PIL import Image
def main():
image = Image.open('../lenna.png')
image.show('Original')
# Rotate 60 degrees counter clockwise
rotated_image = image.rotate(60)
rotated_image.show('Rotate 60')
# Rotate using Image.transpose
# Transpose supports these values:
# - Image.FLIP_LEFT_RIGHT
# - Image.FLIP_TOP_BOTTOM
# - Image.ROTATE_90
# - Image.ROTATE_180
# - Image.ROTATE_270
rotated_image = image.transpose(Image.ROTATE_90)
rotated_image.show('Rotate 90')
# Flip horizontal
flipped_image = image.transpose(Image.FLIP_LEFT_RIGHT)
flipped_image.show('Flip horizontal')
# Flip vertical
flipped_image = image.transpose(Image.FLIP_TOP_BOTTOM)
flipped_image.show('Flip vertical')
if __name__ == '__main__':
main()
|
gkostadinov/py-pil-imageprocessing
|
1-transformations/2.rotate.py
|
2.rotate.py
|
py
| 837 |
python
|
en
|
code
| 5 |
github-code
|
6
|
9411671299
|
from django.urls import path
from . import views as blog_views
# import users.views as user_views
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView
)
urlpatterns = [
path('', PostListView.as_view(), name='blog-home'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
# using a variable in the route (for individual posts, which will be numbered)
# the detail view is expecting the "pk" variable (we could change this in the class)
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/update', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete', PostDeleteView.as_view(), name='post-delete'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('about/', blog_views.about, name='blog-about'),
]
|
Coniferish/djangoTutorial
|
blog/urls.py
|
urls.py
|
py
| 935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35226084140
|
from __future__ import division
# Our Backend for the App!
# Built with Flask
# Import Flask
import flask
import requests
import os
from flask import send_file
import re
import sys
# Create the application
app = flask.Flask(__name__)
# serving home.html
@app.route('/', methods=['GET'])
def serve_page():
return flask.render_template('home.html')
# process query
@app.route('/process_query', methods=['POST'])
def process_query():
data = flask.request.form # is a dictionary
input = data['user_input']
input_in_list = input.split(' ')
return flask.render_template('home.html', same=processInput(input_in_list), og=input)
def processInput(input_in_list):
for s, i in enumerate(input_in_list):
if "bye" in i.lower():
input_in_list[s] = "static/bye.jpg"
if "hello" in i.lower():
input_in_list[s] = "static/hello.png"
if "yes" in i.lower():
input_in_list[s] = "static/yes.png"
if "no" in i.lower():
input_in_list[s] = "static/no.png"
if "please" in i.lower():
input_in_list[s] = "static/please.png"
if "thanks" in i.lower():
input_in_list[s] = "static/thanks.png"
if "who" in i.lower():
input_in_list[s] = "static/who.png"
if "what" in i.lower():
input_in_list[s] = "static/what.png"
if "when" in i.lower():
input_in_list[s] = "static/when.png"
if "where" in i.lower():
input_in_list[s] = "static/where.png"
if "why" in i.lower():
input_in_list[s] = "static/why.png"
if "which" in i.lower():
input_in_list[s] = "static/which.png"
if "how" in i.lower():
input_in_list[s] = "static/how.png"
return input_in_list
def listen_print_loop(responses):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
return flask.render_template('home.html', same=processInput("".join(transcript).split(" ")), og="".join(transcript))
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if re.search(r'\b(exit|quit)\b', transcript, re.I):
print('Exiting..')
break
num_chars_printed = 0
@app.route('/speech', methods=['GET'])
def main():
# See http://g.co/cloud/speech/docs/languages
# for a list of supported languages.
language_code = 'en-US' # a BCP-47 language tag
if __name__ == '__main__':
app.run(debug=True)
|
manichandra95151/TTSL
|
main.py
|
main.py
|
py
| 4,265 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24312665302
|
from langdetect import detect
def to_sentences(text):
text = text.replace("\n", " ")
sentences = [s + '.' for s in text.split(".") if s != ""]
return sentences
def divide(text, input_size=5000):
"""
Divide text into chunks of input_size
Args:
text (str): Text to be divided
input_size (int): Size of each chunk
"""
# short input_size if asian
lang = detect(text)
if lang in ['ko', 'ja', 'zh-cn', 'zh-tw', 'zh-hk']:
input_size = min(input_size, 1300)
# divide text by words
text = to_sentences(text)
result = []
temp = ""
for word in text:
if len(temp + word) >= input_size:
result.append(temp)
temp = ""
temp += word
result.append(temp)
return result
|
hyunooss/SSUmmary
|
django-server/ssummary_site/modules/utils.py
|
utils.py
|
py
| 788 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45375013026
|
"""
include packages
"""
from settings import *
import sqlite3
import discord
from discord import app_commands
import sys
import signal
import deepl
from typing import Optional
from lib import vote as vt
from lib import deepl as dl
"""
Global variables
"""
connection : sqlite3.Connection = sqlite3.connect(DATABASE)
intents : discord.Intents = discord.Intents.all()
client : discord.Client = discord.Client(intents=intents)
tree : app_commands.CommandTree = app_commands.CommandTree(client=client)
"""
Setup
"""
vt.init(connection)
with open(DEEPL_API_KEY) as f:
dl_translator = dl.LoggingTranslator(f.read(), connection=connection)
"""
Commands
"""
@tree.command(
name='test',
description='This is a test'
)
@app_commands.describe(
message='Your message',
hello='Hello message'
)
@app_commands.rename(
message='text'
)
@app_commands.choices(
hello=[
app_commands.Choice(name='Good Morning', value='Good Morning'),
app_commands.Choice(name='Good Afternoon', value='Good Afternoon'),
app_commands.Choice(name='Good Evening', value='Good Evening'),
app_commands.Choice(name='Good Night', value='Good Night')
]
)
@app_commands.guild_only
async def test(ctx: discord.Interaction, message: str, hello: str):
await ctx.response.send_message('This is a test message.\nYour message is ...\n'+message+'\n'+hello)
@tree.command(
name='vote',
description='ๆ็ฅจใ่กใ'
)
@app_commands.describe(
title='ๆ็ฅจใฎใ้ก',
visible='ๆ็ฅจ็ตๆใ่กจ็คบใใ้ใซๆ็ฅจๅ
ใ่กจ็คบใใ',
)
@app_commands.choices(
visible=[
app_commands.Choice(name='่กจ็คบใใ', value='Yes'),
app_commands.Choice(name='่กจ็คบใใชใ', value='No')
]
)
@app_commands.guild_only
async def vote_with_any_choices(ctx: discord.Interaction, title: str, visible: str='Yes'):
try:
await ctx.response.send_modal(vt.VoteModal(title=title, visible=visible))
except Exception as e:
print(e.with_traceback(sys.exc_info()[2]))
@tree.command(
name='deepl',
description='DeepL็ฟป่จณใไฝฟ็จใใฆใใญในใใ็ฟป่จณใใ๏ผdefault: AutoโJP๏ผ'
)
@app_commands.describe(
text='็ฟป่จณใใใใญในใ',
source_language='็ฟป่จณๅใฎ่จ่ช๏ผdefault: ่ชๅๆคๅบ๏ผ',
target_language='็ฟป่จณๅพใฎ่จ่ช๏ผdefault: ๆฅๆฌ่ช๏ผ'
)
@app_commands.choices(
source_language=dl.DcLanguageList.SOURCE,
target_language=dl.DcLanguageList.TARGET
)
async def deepl_translate(ctx: discord.Interaction, text: str, source_language: Optional[str] = None, target_language: str = deepl.Language.JAPANESE):
try:
if source_language == "":
source_language = None
translated_text = dl_translator.translate_text(
ctx=ctx,
text=text,
source_lang=source_language,
target_lang=target_language
)
t = "> " + text.replace("\n", "\n> ") + "\n"
await ctx.response.send_message(t + translated_text.text)
except Exception as e:
print(e.with_traceback(sys.exc_info()[2]))
"""
Events
"""
@client.event
async def on_ready():
print('Bot is ready')
await tree.sync()
"""
Cleanups
"""
def cleanup():
global connection
connection.close()
def signal_handler(signum, frame):
cleanup()
sys.exit(1)
if __name__ == '__main__':
signal.signal(signal.SIGTERM, signal_handler)
try:
with open(TOKEN) as f:
client.run(f.read())
finally:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
cleanup()
|
GrapeJuicer/GrapeBot
|
app/main.py
|
main.py
|
py
| 3,637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22807898242
|
# coding: utf8
"""้Lock ็จไบ้ฟๅ
่ฟ็จ้ดๅฏนshared memory็ไบๅคบ"""
import multiprocessing
import time
def job(val, num, lo):
lo.acquire() # ๅๅพ้
for _ in range(10):
time.sleep(0.1)
val.value += num
print(val.value)
lo.release() # ้ๆพ้
def multicore():
lo = multiprocessing.Lock() # ๅๅปบ้ๅฏน่ฑก
share_memory = multiprocessing.Value("i", 0) # ๅๅงๅไธบ0็ไธไธชๅ
ฑไบซintๅๅ้
res1 = multiprocessing.Process(target=job, args=(share_memory, 1, lo))
res2 = multiprocessing.Process(target=job, args=(share_memory, 9, lo))
res1.start()
res2.start()
res1.join()
res2.join()
if __name__ == "__main__":
multicore()
|
sola1121/practice_code
|
python3/ๅฏนไบๅผๆญฅ็ไพๅญ/multiprocessing/6 multiprocessing lock้.py
|
6 multiprocessing lock้.py
|
py
| 752 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26552257079
|
#!/usr/bin/env python3
#######################
# ACE3 Setup Script #
#######################
import os
import sys
import shutil
import platform
import subprocess
import winreg
######## GLOBALS #########
MAINDIR = "z"
PROJECTDIR = "ace"
CBA = "P:\\x\\cba"
##########################
def main():
FULLDIR = "{}\\{}".format(MAINDIR, PROJECTDIR)
print("""
######################################
# ACE3 Development Environment Setup #
######################################
This script will create your ACE3 dev environment for you.
Before you run this, you should already have:
- The Arma 3 Tools installed properly via Steam
- A properly set up P-drive
If you have not done those things yet, please abort this script in the next step and do so first.
This script will create two hard links on your system, both pointing to your ACE3 project folder:
[Arma 3 installation directory]\\{} => ACE3 project folder
P:\\{} => ACE3 project folder
It will also copy the required CBA includes to {}, if you do not have the CBA source code already.""".format(FULLDIR, FULLDIR, CBA))
print("\n")
try:
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
key = winreg.OpenKey(reg,
r"SOFTWARE\Wow6432Node\bohemia interactive\arma 3")
armapath = winreg.EnumValue(key, 1)[1]
except:
print("Failed to determine Arma 3 Path.")
return 1
if not os.path.exists("P:\\"):
print("No P-drive detected.")
return 2
scriptpath = os.path.realpath(__file__)
projectpath = os.path.dirname(os.path.dirname(scriptpath))
print("# Detected Paths:")
print(" Arma Path: {}".format(armapath))
print(" Project Path: {}".format(projectpath))
repl = input("\nAre these correct? (y/n): ")
if repl.lower() != "y":
return 3
hemmt_path = os.path.join(projectpath, ".hemttout", "dev")
print("\n# Use Hemmt Dev Path for arma filepatching:")
print(f" y: {hemmt_path}")
print(f" n: {projectpath}")
repl = input("(y/n): ")
filepatching_path = projectpath
if repl.lower() == "y":
if not os.path.exists(hemmt_path):
print(f"creating {hemmt_path}")
os.makedirs(hemmt_path)
filepatching_path = hemmt_path
if os.path.exists("P:\\{}\\{}".format(MAINDIR, PROJECTDIR)):
print("Link on P: already exists. Please finish the setup manually.")
return 4
if os.path.exists(os.path.join(armapath, MAINDIR, PROJECTDIR)):
print("Link in Arma directory already exists. Please finish the setup manually.")
return 5
print("\n# Creating links ...")
try:
if not os.path.exists("P:\\{}".format(MAINDIR)):
os.mkdir("P:\\{}".format(MAINDIR))
if not os.path.exists(os.path.join(armapath, MAINDIR)):
os.mkdir(os.path.join(armapath, MAINDIR))
subprocess.call(["cmd", "/c", "mklink", "/J", "P:\\{}\\{}".format(MAINDIR, PROJECTDIR), projectpath])
subprocess.call(["cmd", "/c", "mklink", "/J", os.path.join(armapath, MAINDIR, PROJECTDIR), filepatching_path])
except:
raise
print("Something went wrong during the link creation. Please finish the setup manually.")
return 6
print("# Links created successfully.")
print("\n# Copying required CBA includes ...")
if os.path.exists(CBA):
print("{} already exists, skipping.".format(CBA))
return -1
try:
shutil.copytree(os.path.join(projectpath, "include", "x", "cba"), CBA)
except:
raise
print("Something went wrong while copying CBA includes. Please copy include\\x\\cba to {} manually.".format(CBA))
return 7
print("# CBA includes copied successfully to {}.".format(CBA))
return 0
if __name__ == "__main__":
exitcode = main()
if exitcode > 0:
print("\nSomething went wrong during the setup. Make sure you run this script as administrator. If these issues persist, please follow the instructions on the ACE3 wiki to perform the setup manually.")
else:
print("\nSetup successfully completed.")
input("\nPress enter to exit ...")
sys.exit(exitcode)
|
acemod/ACE3
|
tools/setup.py
|
setup.py
|
py
| 4,272 |
python
|
en
|
code
| 966 |
github-code
|
6
|
29074159051
|
from RepSys import Error, config
from RepSys.util import execcmd
from RepSys.VCS import *
from os.path import basename, dirname
from os import chdir, getcwd
import sys
import re
import time
from xml.etree import cElementTree as ElementTree
import subprocess
class GITLogEntry(VCSLogEntry):
def __init__(self, revision, author, date):
VCSLogEntry.__init__(self, revision, author, data)
class GIT(VCS):
def __init__(self):
VCS.__init__(self)
self.vcs_name = "git"
self.vcs_command = config.get("global", "git-command", "git")
self.vcs_supports['clone'] = True
self.env_defaults = {"GIT_SSH": self.vcs_wrapper}
def clone(self, url, targetpath, **kwargs):
if url.split(':')[0].find("svn") < 0:
return VCS.clone(self, url, targetpath, **kwargs)
else:
# To speed things up on huge repositories, we'll just grab all the
# revision numbers for this specific directory and grab these only
# in stead of having to go through each and every revision...
retval, result = execcmd("svn log --stop-on-copy --xml %s" % url)
if retval:
return retval
parser = ElementTree.XMLTreeBuilder()
result = "".join(result.split("\n"))
parser.feed(result)
log = parser.close()
logentries = log.getiterator("logentry")
revisions = []
topurl = dirname(url)
trunk = basename(url)
tags = "releases"
execcmd("git svn init %s --trunk=%s --tags=%s %s" % (topurl, trunk, tags, targetpath), show=True)
chdir(targetpath)
for entry in logentries:
revisions.append(entry.attrib["revision"])
while revisions:
execcmd("git svn fetch -r%d" % int(revisions.pop()), show=True)
cmd = ["svn", "rebase"]
return self._execVcs_success(*cmd, **kwargs)
class SVNLook(VCSLook):
def __init__(self, repospath, txn=None, rev=None):
VCSLook.__init__(self, repospath, txn, rev)
# vim:et:ts=4:sw=4
|
mdkcauldron/proyvinds-repsys
|
RepSys/git.py
|
git.py
|
py
| 2,133 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44083632675
|
import gzip
import inspect
import os
from io import StringIO
from typing import Optional, List
import numpy as np
import pandas as pd
def time_map(time_a: float, time_b: float, packet_a: int, packet_b: int, time_c: int, window_tolerance: int = 0) -> \
Optional[float]:
"""
Map an API time into a packet number.
This function was done in order to have a nice visualisation.
The window tolereance is used to capture nearby calls
========= |
ta tb tc=>pc
pa pb
Args:
time_a (float): time of the begining of the flow (in seconds since epoch, time.time())
time_b (float): time of the ending of the flow
packet_a (int): packet number of the begining of the flow
packet_b (int): packet number of the ending of the flow
time_c (float): time of the api call
window_tolerance (int): time shift in second in which api calls are still considered to belong to the flow
Returns:
int: packet number, return None if the mapping fails
"""
window_tolerance *= 1000000
# Check if inside flow
if time_a <= time_c <= time_b: # Chain comparaison
return packet_a + (time_c - time_a) * (packet_b - packet_a) / (time_b - time_a)
# Check if in the window border (simple)
if time_a - window_tolerance <= time_c <= time_b:
return packet_a
"""if time_a <= time_c <= time_b + window_tolerance:
return packet_b"""
# Outside
return None
def pure_time_map(time_a: float, time_b: float, time_c: float, window_tolerance: int = 0) -> Optional[float]:
"""
Map an API time into a packet number.
This function was done in order to have a nice visualisation.
The window tolereance is used to capture nearby calls
========= |
ta tb tc=>pc
Args:
time_a (float): time of the begining of the flow (in seconds since epoch, time.time())
time_b (float): time of the ending of the flow
time_c (float): time of the api call
window_tolerance (int): time shift in second in which api calls are still considered to belong to the flow
Returns:
int: time, return None if the mapping fails
"""
window_tolerance *= 1000000
# Check if inside flow
if time_a <= time_c <= time_b: # Chain comparaison
return time_c
# Check if in the window border (simple)
if time_a - window_tolerance <= time_c <= time_b:
return time_a
"""if time_a <= time_c <= time_b + window_tolerance:
return time_b"""
# Outside
return None
def get_child_pids(current_pid: int, diff: pd.DataFrame) -> List:
"""
Get the child process pid of one process given its pid
Args:
current_pid (int): parent pid
diff (pd dataframe): df recorded
Returns:
list: list of pids
"""
childs = diff[diff["parent_pid"] == current_pid]
if childs.shape[0] == 0:
return [current_pid]
return [current_pid] + [v for index, row in childs.iterrows() for v in get_child_pids(row["process_id"], diff)]
def get_malware_pids(malware_name: str = "2020-09-30-Trickbot-EXE.exe", path: str = "./") -> List:
"""
Get the pids of all the malware generated processes
Args:
malware_name (str, optional): name of the malware. Defaults to "2020-09-08-Trickbot-EXE-gtag-ono72.exe".
Returns:
list: list of pids
path: path of the malware
"""
first = pd.read_csv(path + "process_pre.csv")
post = pd.read_csv(path + "process_post.csv")
first.drop(first.columns[0], axis=1, inplace=True)
post.drop(post.columns[0], axis=1, inplace=True)
diff = first.merge(post, indicator=True,
how='right').loc[lambda x: x['_merge'] != 'both']
try:
malware_pid = int(diff.loc[diff['process_name'] == malware_name]["process_id"].astype(float))
except TypeError:
raise RuntimeError('Malware PID not found, check malware name')
return get_child_pids(malware_pid, diff)
def gzip_to_string(file_path: str) -> str:
"""
Open a gzip file and load the content in a string
This function exists because the gzip may not be proprerly closed.
In this case, the end is corrupted but the rest can be read.
Args:
file_path (string): path of the gzip file
Returns:
str: content of the gzip file
"""
gzip_file = gzip.open(file_path, "rt")
string = ""
while True:
try:
line = gzip_file.readline()
string += line
except EOFError:
break
return string
def get_malware_traces(path: str = "./") -> List:
"""
Get a list of dataframe representing the frida trace
Its current format is [time, api_name, category]
Returns:
list: list of dataframe
"""
pids = list(set(get_malware_pids(path=path)))
traces = []
for pid in pids:
if not os.path.isfile(f"{path}frida_{pid}.txt.gz"):
print("Trace for {pid} does not exist")
header = ["time", "api", "category"]
frida_str = StringIO(gzip_to_string(f"{path}frida_{pid}.txt.gz"))
dataframe = pd.read_csv(frida_str, names=header)
dataframe.drop(
dataframe.loc[dataframe['api'] == 'error'].index, inplace=True)
dataframe.drop_duplicates(
subset=['time', 'api'], keep='first', inplace=True)
dataframe.reset_index(drop=True, inplace=True)
traces.append(dataframe)
dataframe["time_int"] = (dataframe["time"] * 1000000).astype(int)
return traces
class Singleton(type):
# Singleton modified to handle arguments (singleton for each argument set)
_instances = {}
_init = {}
def __init__(cls, name, bases, dct):
cls._init[cls] = dct.get('__init__', None)
def __call__(cls, *args, **kwargs):
init = cls._init[cls]
if init is not None:
key = (cls, frozenset(inspect.getcallargs(init, None, *args, **kwargs).items()))
else:
key = cls
if key not in cls._instances:
cls._instances[key] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[key]
class MalwareTraceExtractor(metaclass=Singleton):
def __init__(self, malware_name: str, path: str):
self.malware_name = malware_name
self.path = path
self.trace_array = None
self._get_trace()
def _get_trace(self) -> None:
pids = list(set(get_malware_pids(self.malware_name, path=self.path)))
return_array = np.empty((0, 4))
for pid in pids:
if not os.path.isfile(f"{self.path}frida_{pid}.txt.gz"):
# print(f"Trace for {pid} does not exist")
continue
header = ["time", "api", "category"]
frida_str = StringIO(gzip_to_string(f"{self.path}frida_{pid}.txt.gz"))
dataframe = pd.read_csv(frida_str, names=header)
dataframe.drop(
dataframe.loc[dataframe['api'] == 'error'].index, inplace=True)
dataframe.drop_duplicates(
subset=['time', 'api'], keep='first', inplace=True)
dataframe.reset_index(drop=True, inplace=True)
dataframe["time_int"] = (dataframe["time"] * 1000000).astype(np.int64)
# To numpy
np_array_df = dataframe.to_numpy()
# Stack to the final array
return_array = np.vstack((return_array, np_array_df))
self.trace_array = return_array
print(len(self.trace_array))
def get_merge_trace(self) -> np.ndarray:
return self.trace_array
def get_segmented_flow_syscalls(segmented_flow: np.ndarray, malware_process_name: str, path: str = "./",
time_delay_allowed: int = 0) -> np.ndarray:
"""
Get a list of API calls corresponding to the segmented flow
:param segmented_flow:
:param malware_process_name:
:param path:
:param time_delay_allowed:
:return:
"""
min_time, max_time = segmented_flow[0][2]
for group in segmented_flow[1:]:
# ['HANDSHAKE', [0, 2], [1612708961378936, 1612708961422139]]
timea, timeb = group[2]
min_time = min(timea, min_time)
max_time = max(timeb, max_time)
# print(min_time, max_time, max_time - min_time)
trace_extractor = MalwareTraceExtractor(malware_name=malware_process_name, path=path)
calls = trace_extractor.get_merge_trace()
# calls = get_malware_traces_merged(malware_process_name, path=path)
returned_calls = np.empty((0, 4))
for call in calls:
mapping = pure_time_map(min_time, max_time, call[3], time_delay_allowed)
if mapping is not None:
returned_calls = np.vstack((returned_calls, call))
return returned_calls
if __name__ == "__main__":
PATH = "trickbot1_1/"
|
llmhyy/malware-traffic
|
Experiments/exp16_visualisation/api_extraction.py
|
api_extraction.py
|
py
| 7,970 |
python
|
en
|
code
| 7 |
github-code
|
6
|
33360864969
|
# dicesimulation.py
# The following code computes the exact probability distribution
# for the sum of two dice:
# probabilities = stdarray.create1D(13, 0.0)
# for i in range(1, 7):
# for j in range(1, 7):
# probabilities[i+j] += 1.0
# for k in range(2, 13):
# probabilities[k] /= 36.0
# After this code completes, probabilities[k] is the probability
# that the dice sum to k. Run experiments to validate this calculation
# simulating n dice throws, keeping track of the frequencies of
# occurrence of each value when you compute the sum of two random
# integers between 1 and 6. How large does n have to be before your
# empirical results match the exact results to three decimal places?
# ans: n must be around 10_000_000 {python3 dicesimulation.py 10000000}
import sys
import random
import stdio
import stdarray
n = int(sys.argv[1])
theoretical_probabilities = stdarray.create1D(13, 0.0)
for i in range(1, 7):
for j in range(1, 7):
theoretical_probabilities[i+j] += 1.0
for k in range(2, 13):
theoretical_probabilities[k] /= 36.0
# simulate dice throw
dice_sum = stdarray.create1D(13, 0.0)
for i in range(0, n):
first_die = random.randint(1, 6)
second_die = random.randint(1, 6)
dice_sum[first_die + second_die] += 1
empirical_probabilities = stdarray.create1D(13, 0.0)
for i in range(0, len(empirical_probabilities)):
empirical_probabilities[i] = dice_sum[i] / sum(dice_sum)
diff = stdarray.create1D(13, 0.0)
stdio.writeln('sum theo empir diff')
for i in range(2, 13):
theoretical_probabilities[i] = round(theoretical_probabilities[i], 3)
empirical_probabilities[i] = round(empirical_probabilities[i], 3)
diff[i] = round(theoretical_probabilities[i] - empirical_probabilities[i], 3)
if len(str(i)) < 2:
stdio.write(' ')
stdio.writeln(str(i) + ' ' + str(theoretical_probabilities[i]) + ' ' + str(empirical_probabilities[i]) + ' ' + str(diff[i]))
|
positronn/ippaida
|
chapter01/arrays/dicesimulation.py
|
dicesimulation.py
|
py
| 1,940 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71119888509
|
import matplotlib.pyplot as plt
import numpy as np
# ~~~ DEFINE DATA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
np.random.seed(1337)
n = 1000000
x = np.random.standard_normal(n)
y = x + .5 * np.random.standard_normal(n)
hist, xedges, yedges = np.histogram2d(x, y, bins=100, density=True)
hist[hist == 0] = None
t = np.linspace(0, 3 * np.pi, 1000)
style = 'mpl'
# ~~~ PLOT LINEAR ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fig, ax = plt.subplots()
plt.plot(t, np.sin(t), t, np.cos(t), t, 2 * np.cos(t))
plt.tight_layout()
plt.savefig(f'gallery/{style}_plot.png')
plt.close()
# legend
fig, ax = plt.subplots()
plt.plot(t, np.sin(t), label='sin')
plt.plot(t, np.cos(t), label='cos')
plt.plot(t, 2 * np.cos(t), label='2cos')
plt.legend(title='function:')
plt.tight_layout()
plt.savefig(f'gallery/{style}_plot_legend.png')
plt.close()
# mulitple subgallery
fig, axs = plt.subplots(3, 1, sharex=True, gridspec_kw={'hspace': 0.000})
axs[0].plot(t, np.sin(t))
axs[1].plot(t[::20], np.cos(t[::20]), 'o-')
axs[2].plot(t, 2 * np.cos(t), t, np.sin(t))
plt.tight_layout()
plt.savefig(f'gallery/{style}_plot_multiple.png')
plt.close()
# ~~~ PLOT IMSHOW ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fig, ax = plt.subplots()
plt.imshow(hist)
plt.tight_layout()
plt.savefig(f'gallery/{style}_imshow.png')
plt.close()
# cbar
fig, ax = plt.subplots()
im = plt.imshow(hist)
plt.colorbar(im)
plt.tight_layout()
plt.savefig(f'gallery/{style}_imshow_cbar.png')
plt.close()
|
braniii/prettypyplot
|
gallery/comparison_mpl.py
|
comparison_mpl.py
|
py
| 1,505 |
python
|
en
|
code
| 4 |
github-code
|
6
|
24091654897
|
from Film import Film
from Forgalmazo import Forgalmazo
import datetime
def fajl_beolvas():
filmek = []
fp = open('nyitohetvege.txt', 'r', encoding='utf-8')
lines = fp.readlines()
fp.close()
for line in lines[1:]:
n_line = line.rstrip()
(eredetiCim, magyarCim,bemutato,forgalmazo,
bevel,latogato) = n_line.split(';')
film = Film(eredetiCim, magyarCim,bemutato,forgalmazo,
bevel,latogato)
filmek.append(film)
return filmek
def feladat3(filmek):
print('3. feladat: Filmek szรกma az รกllomรกnyban: ', end='')
filmek_szama = len(filmek)
print(filmek_szama, 'db')
def feladat4(filmek):
print('4. feladat: UIP Duna Film forgalmazรณ 1. ', end='')
print('hetes bevรฉteleinek รถsszege: ', end='')
osszeg=0
for film in filmek:
if film.forgalmazo == 'UIP':
osszeg+=int(film.bevel)
print("{:,}".format(osszeg), 'Ft')
def feladat5(filmek):
print('5. feladat: Legtรถbb lรกtogatรณ az elsล hรฉten:')
max_film = filmek[0]
for film in filmek:
if int(film.latogato) > int(max_film.latogato):
max_film = film
print('\tEredeti cรญm:', max_film.eredetiCim)
print('\tMagyar cรญm:', max_film.magyarCim)
print('\tForgalmazรณ:', max_film.forgalmazo)
print('\tBevรฉtel az elsล hรฉten:', max_film.bevel, 'Ft')
print('\tLรกtogatรณk szรกma:', max_film.latogato, 'fล')
def tartalmazTeszt(eredetiCim, magyarCim):
eredetiTartalmaz=False
if 'W' in eredetiCim:
eredetiTartalmaz=True
if 'w' in eredetiCim:
eredetiTartalmaz=True
magyarTartalmazza=False
if 'W' in magyarCim:
magyarTartalmazza=True
if 'w' in magyarCim:
magyarTartalmazza=True
if eredetiTartalmaz and magyarTartalmazza:
return True
else:
return False
def feladat6(filmek):
print('6. feladat: ', end='')
n=len(filmek)
i=0
while (i<n and
not tartalmazTeszt(filmek[i].eredetiCim, filmek[i].magyarCim)):
i+=1
if i<n:
print("Ilyen film volt!")
else:
print("Ilyen film nem volt!")
def forgalmazoTeszt(forgalmazok, forgalmazo):
n=len(forgalmazok)
i=0
while i<n and forgalmazok[i].nev != forgalmazo:
i+=1
if i<n:
return True
else:
return False
def feladat7(filmek):
mezonevek = 'forgalmazo;filmekSzama\n'
forgalmazok = []
for film in filmek:
if not forgalmazoTeszt(forgalmazok, film.forgalmazo):
forgalmazo = Forgalmazo(film.forgalmazo)
forgalmazok.append(forgalmazo)
else:
n=len(forgalmazok)
for i in range(0, n):
if forgalmazok[i].nev == film.forgalmazo:
forgalmazok[i].filmek += 1
fp = open('stat.csv', 'w', encoding='utf-8')
fp.write(mezonevek)
for forgalmazo in forgalmazok:
if forgalmazo.filmek>1:
fp.write(forgalmazo.nev + ';' + str(forgalmazo.filmek) + '\n')
fp.close()
def feladat8(filmek):
print('8. feladat: A leghosszabb idลszak kรฉt ', end='')
print('InterCom-os bemutatรณ kรถzรถtt: ', end='')
elsoBemutato = None
max_kul = 0
for film in filmek:
if film.forgalmazo == 'InterCom':
isoDatum = film.bemutato.replace('.', '-')
if elsoBemutato == None:
elsoBemutato=datetime.date.fromisoformat(isoDatum)
else:
kovBemutato = datetime.date.fromisoformat(isoDatum)
kul = kovBemutato - elsoBemutato
if kul.total_seconds() > max_kul:
max_kul=kul.total_seconds()
elsoBemutato = kovBemutato
nap = max_kul // (24 * 3600)
print(int(nap), 'nap')
filmek = fajl_beolvas()
# ~ feladat3(filmek)
# ~ feladat4(filmek)
# ~ feladat5(filmek)
# ~ feladat6(filmek)
# ~ feladat7(filmek)
feladat8(filmek)
|
janos01/esti2020Python
|
gyakorlo/Nyito/src/OpeningWeekend.py
|
OpeningWeekend.py
|
py
| 3,940 |
python
|
hu
|
code
| 0 |
github-code
|
6
|
38416706519
|
import numpy as np
class TrinomialModel(object): # Here we start defining our 'class' --> Trinomial Model!
# First, a method to initialize our `TrinomialModel` algorithm!
def __init__(self, S0, r, sigma, mat):
self.__s0 = S0
self.__r = r
self.__sigma = sigma
self.__T = mat
# Second, we build a method (function) to compute the risk-neutral probabilities!
def __compute_probs(self):
self.__pu = (
(
np.exp(self.__r * self.__h / 2)
- np.exp(-self.__sigma * np.sqrt(self.__h / 2))
)
/ (
np.exp(self.__sigma * np.sqrt(self.__h / 2))
- np.exp(-self.__sigma * np.sqrt(self.__h / 2))
)
) ** 2
self.__pd = (
(
-np.exp(self.__r * self.__h / 2)
+ np.exp(self.__sigma * np.sqrt(self.__h / 2))
)
/ (
np.exp(self.__sigma * np.sqrt(self.__h / 2))
- np.exp(-self.__sigma * np.sqrt(self.__h / 2))
)
) ** 2
self.__pm = 1 - self.__pu - self.__pd
assert 0 <= self.__pu <= 1.0, "p_u should lie in [0, 1] given %s" % self.__pu
assert 0 <= self.__pd <= 1.0, "p_d should lie in [0, 1] given %s" % self.__pd
assert 0 <= self.__pm <= 1.0, "p_m should lie in [0, 1] given %s" % self.__pm
# Third, this method checks whether the given parameters are alright and that we have a 'recombining tree'!
def __check_up_value(self, up):
if up is None:
up = np.exp(self.__sigma * np.sqrt(2 * self.__h))
assert up > 0.0, "up should be non negative"
down = 1 / up
assert down < up, "up <= 1. / up = down"
self.__up = up
self.__down = down
# Four, we use this method to compute underlying stock price path
def __gen_stock_vec(self, nb):
vec_u = self.__up * np.ones(nb)
np.cumprod(vec_u, out=vec_u)
vec_d = self.__down * np.ones(nb)
np.cumprod(vec_d, out=vec_d)
res = np.concatenate((vec_d[::-1], [1.0], vec_u))
res *= self.__s0
return res
# Fifth, we declare a Payoff method to be completed afterwards depending on the instrument we are pricing!
def payoff(self, stock_vec):
raise NotImplementedError()
# Sixth, compute current prices!
def compute_current_price(self, crt_vec_stock, nxt_vec_prices):
expectation = np.zeros(crt_vec_stock.size)
for i in range(expectation.size):
tmp = nxt_vec_prices[i] * self.__pd
tmp += nxt_vec_prices[i + 1] * self.__pm
tmp += nxt_vec_prices[i + 2] * self.__pu
expectation[i] = tmp
return self.__discount * expectation
# Seventh, Option pricing!
def price(self, nb_steps, up=None):
assert nb_steps > 0, "nb_steps shoud be > 0"
nb_steps = int(nb_steps)
self.__h = self.__T / nb_steps
self.__check_up_value(up)
self.__compute_probs()
self.__discount = np.exp(-self.__r * self.__h)
final_vec_stock = self.__gen_stock_vec(nb_steps)
final_payoff = self.payoff(final_vec_stock)
nxt_vec_prices = final_payoff
for i in range(1, nb_steps + 1):
vec_stock = self.__gen_stock_vec(nb_steps - i)
nxt_vec_prices = self.compute_current_price(vec_stock, nxt_vec_prices)
return nxt_vec_prices[0]
class TrinomialCall(TrinomialModel):
def __init__(self, S0, r, sigma, mat, K):
super(TrinomialCall, self).__init__(S0, r, sigma, mat)
self.__K = K
def payoff(self, s):
return np.maximum(s - self.__K, 0.0)
class TrinomialAmerican_C(TrinomialCall):
def __init__(self, S0, r, sigma, mat, K):
super(TrinomialAmerican_C, self).__init__(S0, r, sigma, mat, K)
def compute_current_price(self, crt_vec_stock, nxt_vec_prices):
expectation = TrinomialCall.compute_current_price(
self, crt_vec_stock, nxt_vec_prices
)
return np.maximum(expectation, TrinomialCall.payoff(self, crt_vec_stock))
class TrinomialPut(TrinomialModel):
def __init__(self, S0, r, sigma, mat, K):
super(TrinomialPut, self).__init__(S0, r, sigma, mat)
self.__K = K
def payoff(self, s):
return np.maximum(self.__K - s, 0.0)
class TrinomialAmerican_P(TrinomialPut):
def __init__(self, S0, r, sigma, mat, K):
super(TrinomialAmerican_P, self).__init__(S0, r, sigma, mat, K)
def compute_current_price(self, crt_vec_stock, nxt_vec_prices):
expectation = TrinomialPut.compute_current_price(
self, crt_vec_stock, nxt_vec_prices
)
return np.maximum(expectation, TrinomialPut.payoff(self, crt_vec_stock))
|
piper-of-dawn/DE-GWP1
|
trinomial_pricing.py
|
trinomial_pricing.py
|
py
| 4,817 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73591272829
|
# def funct1(nums):
# """ๆฑไป1ๅ ๅฐnums็ๅ"""
# sum1 = 0
# for num in range(1, nums+1): # ไฝฟ็จๅพช็ฏ้ๅ1ๅฐnums
# sum1 += num
# print(sum1)
#
# funct1(100)
#
# def max1(a, *numbers):
# """ๆฑๅคไธชๆฐไธญ็ๆๅคงๅผ"""
# num1 = a # ๅฎไน็ฌฌไธไธชๆฐ
# for num in numbers:
# if num > num1: # ๅฐaๅปๆฏ่พๅ้ข็ๆฐ
# num1 = num
# print(num1)
#
# max1(12, 34, 545, 43, 435, 6)
#
# def shai_zi(N):
# """ๅฎ็ฐๆ่ฒๅญๅ่ฝ๏ผๅนถๆฑๆๆ่ฒๅญ็็นๆฐๅ"""
# import random # ๅฏผๅ
ฅ้ๆบๆฐๆจกๅ
# sum2 = 0
# for x in range(N):
# num = random.randint(1, 6)
# print(num) # ๆๅฐๆฏไธช่ฒๅญ็็นๆฐ
# sum2 += num
# print(sum2)
#
#
# shai_zi(3)
#
# def change_dict(dict1):
# """ไบคๆขๅญๅ
ธไธญ้ฎๅๅผ"""
# dict2 = {}
# for x in dict1:
# value = dict1[x] # ๆๅ่พๅ
ฅๅญๅ
ธ็ๅผ
# dict2[value] = x
# print(dict2)
#
# change_dict({'a': 1, 'b': 2})
# def max1(num1, num2, num3):
# """ๆฏ่พไธไธชๆฐ็ๅคงๅฐ๏ผๅนถๆฑๅบๆๅคง็ๅผ"""
# max_num = num1 # ๅๅฎ็ฌฌไธไธชๆฐๆๅคง
# if max_num < num2:
# max_num = num2
# if max_num < num3:
# max_num = num3
#
# print(max_num)
#
# max1(52, 24, 4)
#
# def print_letter(string):
# """ๆๅๅญ็ฌฆไธฒ็ๅญๆฏ๏ผๅนถๆผๆฅๆๅฐ"""
# str1 = '' # ๅฝๅไธไธช็ฉบ็ๅญ็ฌฆไธฒ
# for x in string:
# if x.isalpha() == True:
# str1 += x # ๆผๆฅๅญ็ฌฆไธฒ
#
# print(str1)
#
# print_letter('d2eweds@!we')
# def average_value(*number):
# """ๆฑๅนณๅๆฐ"""
# many = len(number) # ็จไบ่ฎกๆฐ
# sum1 = sum(number) # ๆฑๅ๏ผ็ปๆไธบstrๆฐๆฎ็ฑปๅ
# print('ๅนณๅๆฐไธบ%.3f ' % float(sum1/many))
#
# average_value(1, 3, 5, 7, 9, 10, 11)
# def factorial(nums=10):
# """่ฎก็ฎไธไธชๆฐ็้ถไน"""
# sum1 = 1 # ็จไบ่ฃ
้ถไน็็ปๆ
# for nums in range(1, nums+1):
# sum1 *= nums
#
# print(sum1)
#
# factorial(5)
def operation(symbol, *numbers):
"""ๅคๅ่ฝ่ฎก็ฎๅฝๆฐ"""
list1 = list(numbers)
if symbol == '+': # ๅคๆญ่ฟ็ฎ็ฌฆๅทๆฏๅฆๆฏๅ ๅท
sum1 = sum(list1)
if symbol == '-': # ๅคๆญ่ฟ็ฎ็ฌฆๅทๆฏๅฆๆฏๅๅท
sum1 = list1.pop(0)
for num in list1:
sum1 -= num
if symbol == '*': # ๅคๆญ่ฟ็ฎ็ฌฆๅทๆฏๅฆๆฏไนๅท
sum1 = 1
for num in list1:
sum1 *= num
if symbol == '/': # ๅคๆญ่ฟ็ฎ็ฌฆๅทๆฏๅฆๆฏ้คๅท
sum1 = list1.pop(0)
for num in list1:
sum1 /= num
print('่ฟ็ฎ็ฌฆๅทไธบ%s,็ปๆไธบ%.3f' % (symbol, float(sum1)))
operation('/', 1000, 5, 18, 20)
|
gilgameshzzz/learn
|
day7Python็ฎก็็ณป็ป/ไฝไธ.py
|
ไฝไธ.py
|
py
| 2,719 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35834788029
|
# https://codility.com/demo/results/training89TSCH-AEW/
def solution(X, A):
# write your code in Python 2.7
N=len(A)
lvs = dict.fromkeys(xrange(1,X+1),0)
cnt=0
for i in xrange(N):
x=A[i]
if x > N+1:
continue
if lvs[x]==0:
lvs[x]=1
cnt+=1
if cnt==X:
return i
return -1
|
peterkisfaludi/Codility
|
02-Counting-elements/frogriverone.py
|
frogriverone.py
|
py
| 412 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40572545154
|
import os
import csv
candidate_dict = {}
candidate_name = []
csvpath = os.path.join('Resources', 'election_data.csv')
text = os.path.join('analysis', "Output.txt")
print("Election Results")
with open(text, "w+") as file:
file.write("Election Results")
with open(csvpath) as csvfile:
# CSV reader specifies delimiter and variable that holds contents
csvreader = csv.reader(csvfile, delimiter=',')
csv_header = next(csvreader)
#print(f"CSV Header: {csv_header}")
for row in csvreader:
candidate_name = row[2]
if candidate_name in candidate_dict:
candidate_dict[candidate_name] += 1
else:
candidate_dict[candidate_name] = 1
# print(csvreader)
total_votes = sum(candidate_dict.values())
# Read the header row first (skip this step if there is now header)
#csv_header = next(csvreader)
print("---------------------")
file.write("---------------------")
print(f"Total Votes: {total_votes}")
file.write(f"Total Votes: {total_votes}")
print("---------------------")
file.write("---------------------")
winner = ''
winning_total = 0
for key in candidate_dict:
if winning_total < candidate_dict[key]:
winner = key
winning_total = candidate_dict[key]
vote_percent = float(candidate_dict[key]) / float(total_votes) * 100
#print(key)
#print(candidate_dict[key])
print(f'{key} = {vote_percent:.3f}% ({candidate_dict[key]})')
file.write(f'{key} = {vote_percent:.3f}% ({candidate_dict[key]})')
print("---------------------")
file.write("---------------------")
#print(f'{candidate_dict[candidate_name]}')
print(f"Winner: {winner}")
file.write(f"Winner: {winner}")
#print(candidate_name[])
# text = float(text)
# with open('Output.txt', 'w') as file:
# file.write("Election Results")
# file.write("---------------------")
# file.write(f"Total Votes: {total_votes}")
# file.write("---------------------")
# file.write(f'{key} = {vote_percent:.3f}% ({candidate_dict[key]})')
# file.write("---------------------")
# file.write(f"Winner: {winner}")
|
David-Lucey/python-challenge
|
PyPoll/main.py
|
main.py
|
py
| 2,268 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24584540912
|
# Use the file name mbox-short.txt as the file name
fname = input("Enter file name: ")
try:
fh = open(fname)
except:
if fname == "na na boo boo":
print ("NA NA BOO BOO TO YOU - You have been punk'd!")
quit()
else:
print("file doesn't exist" ,fname)
quit()
total = 0
count = 0
for line in fh:
if not line.startswith("X-DSPAM-Confidence:"):
continue
position = line.find(':')
number = float(line[position+1:])
count = count + 1
total = total + number
average = total/count
print("Average spam confidence:", average)
|
cruzandfamily/Exercise-7
|
7_3.py
|
7_3.py
|
py
| 614 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32043413373
|
import getpass
import datetime
import urllib, urllib.request
import os, sys
from random import randint
from shutil import copyfileobj
from html.parser import HTMLParser
#Time, Right Now.
now = datetime.datetime.now()
#Get local username
UserName = getpass.getuser()
#Define End-Of-Script Quit-Action
def quitting_time():
print()
print()
print("Your Precious Cargo Is On Your Desktop, Inside The Folder 'ThisAmericanLife'.")
print("I Now Retire To My Humble Abode.")
print("Thank You, User, For This Opportunity.")
input("Press ENTER To End Program.")
sys.exit(0)
#Change download directory to "ThisAmericanLife" on User's Desktop
if not os.access('/home/' + UserName + '/Desktop/ThisAmericanLife/', os.F_OK):
os.mkdir('/home/' + UserName + '/Desktop/ThisAmericanLife/')
os.chdir('/home/'+ UserName +'/Desktop/ThisAmericanLife/')
#Required for parsing and stripping HTML data
class MLStripper(HTMLParser): #Supports the stripping of tags from "straight html"
def __init__(self):
super().__init__()
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
#This strips html tags from html; input must be "straight html"
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
#Define asking User what s/he wants to do
def what_to_do (retries=4, complaint='Answer With A Number Between 1 and 5, Please'):
Option_01 = set(['1', '01', 'one'])
Option_02 = set(['2','02','two'])
Option_03 = set(['3','03','three'])
Option_04 = set(['4','04','four'])
Option_05 = set(['5','05','five'])
while True:
we_are_doing = input("Answer With Numbers 1 thru 5 >> ").lower()
if we_are_doing in Option_01:
print("We'll now download one episode of your choice.")
print()
One_OneEpisode()
if we_are_doing in Option_02:
print("We'll now download your choice of a block of episodes.")
print()
Two_EpisodeBlock()
if we_are_doing in Option_03:
print("We'll now download episodes from your choice to the current episode.")
print()
Three_ScatteredEpisodes()
if we_are_doing in Option_04:
print("We'll now download a smattering of episodes of your choice.")
print()
Four_EpiChoiceToCurrent()
if we_are_doing in Option_05:
print("We'll now download five random episodes for you.")
print()
Five_5RandomEpis()
retries = retries - 1
if retries < 0:
print("You Are Incapable Of Following Instructions.")
print("I'm Done Trying To Help You.")
input("Press ENTER To Quit, As I Have.")
sys.exit(0)
print(complaint)
####### Subroutine: Get Latest Episode Number #######
def get_latest_episode_number():
#Note: this subroutine creates and deletes a temporary txt file, "TAL_Archive_HTML.txt"
###Global Variables###
#this is required because we need to know what the latest episode number is Everywhere
global LatestEpisodeNumber
###Global Variables###
#Get the HTML source code from T.A.L.'s Archive URL
website = urllib.request.urlopen("http://www.thisamericanlife.org/radio-archives").read()
#Save Website to risk_reward.txt [file will be removed when the data is retrieved]
strip_write = open('/home/' + UserName + '/Desktop/ThisAmericanLife/TAL_Archive_HTML.txt', 'w')
strip_write.write(strip_tags(website.decode('utf-8')))
strip_write.close()
#Search through HTML-stripped source data for the latest episode, keying off the first instance of "Share"
with open('/home/' + UserName + '/Desktop/ThisAmericanLife/TAL_Archive_HTML.txt', 'r+') as TAL_Archive_HTML:
for line in TAL_Archive_HTML:
if "Share" in line:
#Assign the latest episode's information [number, title, date] without leading spaces to variable CurrentEpisodeLineInfo
CurrentEpisodeLineInfo = TAL_Archive_HTML.__next__().lstrip()
#Assign the last 10 characters of space-stripped CurrentEpisodeLineInfo to LatestEpisodeDate [DD.MM.YYYY]
LatestEpisodeDate = CurrentEpisodeLineInfo[-11:-1]
#Check if the latest queue'd episode is available today, keying off date information
if now.strftime("%m.%d.%Y") < LatestEpisodeDate:
LatestEpisodeNumber = int(CurrentEpisodeLineInfo[0:3]) - 1
elif now.strftime("%m.%d.%Y") == LatestEpisodeDate:
#We need to find out when today's episode is released for download... today, or tomorrow.
#GetTheTodayActionStarted
LatestEpisodeNumber = int(CurrentEpisodeLineInfo[0:3]) - 1
pass
elif now.strftime("%m.%d.%Y") > LatestEpisodeDate:
LatestEpisodeNumber = CurrentEpisodeLineInfo[0:3]
else:
print()
print()
print("Call The Doctor.")
print("I Now Hide Behind Cpt. Jack Harkness For Safety.")
input("Press ENTER To Escape Your Doom.")
sys.exit(0)
break
print("The latest episode in the queue is " + CurrentEpisodeLineInfo)
print()
print("The latest available episode is Episode #" + str(LatestEpisodeNumber))
print()
#Remove the temporary txt file TAL_Archive_HTML.txt
os.remove("/home/" + UserName + "/Desktop/ThisAmericanLife/TAL_Archive_HTML.txt")
####### Subroutine: Get Episode Number From User #######
def get_episode_number_from_user():
###Global Variables###
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
#This is required in case User enters "0" for the episode number
global nakednumber
###Global Variables###
number = input("Give Me An Episode Number >> ")
print()
nakednumber = number.lstrip("0")
if nakednumber == "":
print("You told me to download 'Episode 0', which does not exist.")
get_episode_number_from_user()
else:
pass
EpisodeNumber = int(nakednumber)
if(EpisodeNumber >= 1):
pass
else:
print("You didn't give me a positive whole number.")
get_episode_number_from_user()
####### Subroutine: Generate Random Episode Number #######
def generate_random_episode_number():
###Global Variables###
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
RandomEpisodeNumber = randint(0,int(LatestEpisodeNumber))
EpisodeNumber = RandomEpisodeNumber
####### Subroutine: Check Number Is Valid #######
def check_epi_number_validity():
if EpisodeNumber >= int(LatestEpisodeNumber):
while (EpisodeNumber >= int(LatestEpisodeNumber)):
print("The episode number you have chosen is in the Future.")
print()
get_episode_number_from_user()
else:
pass
####### Subroutine: Download The Episode #######
def download_the_episode():
###Global Variables###
#this is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
mp3 = str(EpisodeNumber) + ".mp3"
with urllib.request.urlopen(("http://audio.thisamericanlife.org/" + str(EpisodeNumber) + "/" + str(EpisodeNumber) + ".mp3")) as in_stream, open(mp3, 'wb') as out_file:
copyfileobj(in_stream, out_file)
print()
print("I have finished downloading episode #" + str(EpisodeNumber) + " of This American Life.")
print()
##################################################
##########Executing The User's Options############
##################################################
####### Download One Episode #######
def One_OneEpisode():
get_latest_episode_number()
get_episode_number_from_user()
check_epi_number_validity()
download_the_episode()
quitting_time()
####### Download A Choice Block Of Episodes #######
def Two_EpisodeBlock():
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
get_latest_episode_number()
#Get the first boarder episode number from User
print("What episode number is at the beginning of this block of episodes?")
get_episode_number_from_user()
check_epi_number_validity()
FirstNumber = EpisodeNumber
#Get the second boarder episode number from User
print("What episode number is at the end of this block of episodes?")
get_episode_number_from_user()
check_epi_number_validity()
SecondNumber = EpisodeNumber
#A list of the boarder episode numbers
boarder_episodes = [FirstNumber,SecondNumber]
#Find and establish which episode number inputted has the larger value
HigherEpisodeNumber = max(boarder_episodes)
#Find and establish which episode number inputted has the smaller value
LowerEpisodeNumber = min(boarder_episodes)
#Asshole Condition [block of 1 episode]
#We are going to use EpisodeNumber to download, and admonish the User
if FirstNumber == SecondNumber:
print()
print("You should have chosen Option #1: 'Download One Episode of your choice'.")
print("I don't want to out of principle, but to be nice I shall help you anyway.")
print()
download_the_episode()
quitting_time()
else:
pass
#Calculate how many episodes to download
DownloadCycles = int(HigherEpisodeNumber) - int(LowerEpisodeNumber) + 1
#Prime the EpisodeNumber variable for looping
EpisodeNumber = int(LowerEpisodeNumber)
#Download those episodes!
for n in range(0,DownloadCycles):
download_the_episode()
EpisodeNumber = EpisodeNumber + 1
quitting_time()
####### Download Scattered Episodes #######
def Three_ScatteredEpisodes():
get_latest_episode_number()
HowManyEpisodes = input("How Many Episodes Would You Like To Download? >> ")
if(int(HowManyEpisodes) >= 1):
pass
else:
while (int(HowManyEpisodes) < 1):
print()
print("You didn't give me a counting number.")
HowManyEpisodes = input("How Many Episodes Would You Like To Download? >> ")
print()
if int(HowManyEpisodes) > (int(LatestEpisodeNumber) + 1):
while (int(HowManyEpisodes) > (int(LatestEpisodeNumber) + 1)):
print()
print("There are not that many episodes to download at this time.")
print()
print("There are " + str(LatestEpisodeNumber) + " available to download at this time.")
print()
HowManyEpisodes = input("How Many Episodes Would You Like To Download? >> ")
print()
for n in range(0,int(HowManyEpisodes)):
get_episode_number_from_user()
check_epi_number_validity()
download_the_episode()
quitting_time()
####### Download Choice To Latest Available #######
def Four_EpiChoiceToCurrent():
###Global Variables###
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
get_latest_episode_number()
print("I need to know what episode you want to start with.")
get_episode_number_from_user()
check_epi_number_validity()
#Calculate how many episodes to download
DownloadCycles = int(LatestEpisodeNumber) - EpisodeNumber + 1
#Download those episodes!
for n in range(0,DownloadCycles):
download_the_episode()
EpisodeNumber = EpisodeNumber +1
quitting_time()
####### Download Five Random Episodes #######
def Five_5RandomEpis():
###Global Variables###
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
get_latest_episode_number()
for n in range(0,5):
EpisodeNumber = randint(1,int(LatestEpisodeNumber))
download_the_episode()
quitting_time()
##################################################
############### Kick Off The Script ##############
##################################################
#Print-to-Screen Introduction
print('========== =========== ==========')
print("Hello " + UserName + "!")
print("The time is", now.strftime("%Y-%m-%d %H:%M")) #Print to Terminal time, time right now;
print("Let's Download Some Episodes of 'This American Life'.")
print('========== =========== ==========')
#Prompt User On What To Do
print("What Type Of Downloading Would We Like To Do? ")
print()
print("Option 1: One [1] episode of your choice.")
print("Option 2: A continuous block of episodes, your choice.")
print("Option 3: A discontinuous block of episodes, your choice.")
print("Option 4: All episodes between your choice and the current episode [inclusive].")
print("Option 5: Five [5] random episodes.")
print()
what_to_do()
#EndFile.
|
milesnielsen/DownloadEpisodesTAL
|
TAL_Epi_Download.py
|
TAL_Epi_Download.py
|
py
| 13,624 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4657636922
|
import random
print('ะะพะฑัะพ ะฟะพะดะฐะปะพะฒะฐัั ะฒ ัะธัะปะพะฒัั ัะณะฐะดะฐะนะบั')
def is_valid(number, gran):
if gran.isdigit() and number.isdigit():
return int(number) in range(0, int(gran) + 1)
def get_new_rand(gran):
return random.randint(0, int(gran))
n, count = input('ะะฒะตะดะธัะต ะณัะฐะฝะธัั ะธะฝัะตัะฒะฐะปะฐ: '), 0
num, flag = get_new_rand(n), True
while flag:
chislo = input(f'ะะฒะตะดะธัะต ัะธัะปะพ ะพั 1 ะดะพ {n}: ')
if is_valid(chislo, n):
chislo = int(chislo)
if chislo < num:
print('ะะฐัะต ัะธัะปะพ ะผะตะฝััะต ะทะฐะณะฐะดะฐะฝะฝะพะณะพ, ะฟะพะฟัะพะฑัะนัะต ะตัะต ัะฐะทะพะบ')
count += 1
if chislo > num:
print('ะะฐัะต ัะธัะปะพ ะฑะพะปััะต ะทะฐะณะฐะดะฐะฝะฝะพะณะพ, ะฟะพะฟัะพะฑัะนัะต ะตัะต ัะฐะทะพะบ')
count += 1
if chislo == num:
print(f'ะั ัะณะฐะดะฐะปะธ, ะฟะพะทะดัะฐะฒะปัะตะผ!\nะะฐัะต ะบะพะปะธัะตััะฒะพ ะฟะพะฟััะพะบ ัะณะฐะดะฐัั ัะธัะปะพ: {count}')
ans = input('ะะพะถะตั ะฑััั ะตัะต ััะณะฐัะตะผ? (ะดะฐ/ะฝะตั) ')
while True:
if ans == 'ะดะฐ':
n, count, num = input('ะะฒะตะดะธัะต ะฝะพะฒัั ะณัะฐะฝะธัั ะธะฝัะตัะฒะฐะปะฐ: '), 0, get_new_rand(n)
break
if ans == 'ะฝะตั':
flag = False
break
else:
ans = input('ะะตะฟะพะฝะตะป, ัะฐะบ ะดะฐ ะธะปะธ ะฝะตั? ')
else:
print('ะ ะผะพะถะตั ะฑััั ะฒัะต-ัะฐะบะธ ะฒะฒะตะดะตะผ ัะตะปะพะต ัะธัะปะพ?')
print('ะกะฟะฐัะธะฑะพ, ััะพ ะธะณัะฐะปะธ ะฒ ัะธัะปะพะฒัั ัะณะฐะดะฐะนะบั. ะัะต ัะฒะธะดะธะผัั...')
|
WeideR66/littlepythonprojects
|
ugadaika.py
|
ugadaika.py
|
py
| 1,741 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
39290687517
|
#!/usr/bin/env python2
from __future__ import print_function
from Bio import SeqIO
import sys, vcf, getopt
__author__ = 'Kumar'
sample_number = int(0)
vcf_file = ''
a = int(0)
x = int(0)
n = int(0)
position = int(0)
fold = int()
try:
myopts, args = getopt.getopt(sys.argv[1:],"f:s:")
for o, a in myopts:
if o == '-f':
vcf_file = str(a)
elif o == '-s':
sample_number = int(a)
except getopt.GetoptError as e:
print(str(e))
print("Usage:: %s -f <vcf_file> -s <sample index in case of multi-sample vcf file>" % sys.argv[0])
sys.exit(2)
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
sf = open("outfile.sf", "w")
for record in vcf_reader:
#print(record.samples)
position = record.POS
ad = record.samples[sample_number]['AD']
#print(ad)
if ad == None:
continue
else:
a = ad[0]
x = ad[1]
#print("%s::::%s"% (a,x))
n = a + x
if a > x:
fold = 0
else:
fold = 1
header = "location\tx\tn\tfolded\n"
if sf.tell() == 0:
sf.write(header)
sf.write("%d\t%d\t%d\t%d\n"% (position, x, n, fold))
else:
sf.write("%d\t%d\t%d\t%d\n"% (position, x, n, fold))
sf.close()
|
kumarsaurabh20/NGShelper
|
PopulationGenomics/vcf2sf.py
|
vcf2sf.py
|
py
| 1,109 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35510723799
|
# Experiment 24 - Tile Movement
#
# By Chris Herborth (https://github.com/Taffer)
# MIT license, see LICENSE.md for details.
import base64
import os
import pygame
import pygame.freetype
import pygame.gfxdraw
import struct
import sys
import time
import zlib
from xml.etree import ElementTree
SCREEN_TITLE = 'Experiment 24 - Tile Movement'
SCREEN_WIDTH = 1280 # 720p screen
SCREEN_HEIGHT = 720
BLACK = pygame.Color('black')
RED = pygame.Color('red')
WHITE = pygame.Color('white')
# Tiled map parser.
class Map:
def __init__(self, map_path: str) -> None:
tree = ElementTree.parse(map_path)
self.root = tree.getroot()
layers = self.root.findall('layer')
# Map size in tiles.
self.map_width = int(self.root.attrib['width'])
self.map_height = int(self.root.attrib['height'])
# Tile size in pixels.
self.tile_width = int(self.root.attrib['tilewidth'])
self.tile_height = int(self.root.attrib['tileheight'])
# Tileset and image atlas paths are relative to the map file.
prefix = os.path.split(map_path)[0]
tilesets = self.root.findall('tileset')
self.tiles = [None] # Index 0 means "don't draw a tile" in Tiled.
for tileset in tilesets:
tileset_path = os.path.join(prefix, tileset.attrib['source'])
tileset_prefix = os.path.split(tileset_path)[0]
tileset_tree = ElementTree.parse(tileset_path)
tileset_root = tileset_tree.getroot()
image = tileset_root.find('image')
image_path = os.path.join(tileset_prefix, image.attrib['source'])
texture = pygame.image.load(image_path).convert_alpha()
texture_rect = texture.get_rect()
# Create subsurfaces for the tiles in the atlas.
for y in range(texture_rect.height // self.tile_height):
for x in range(texture_rect.width // self.tile_width):
tile_rect = pygame.Rect(x * self.tile_width, y * self.tile_height, self.tile_width, self.tile_height)
self.tiles.append(texture.subsurface(tile_rect))
self.layer_data = {}
for layer in layers:
# Decode the layer data. This map is using CSV, which is easy; for
# help decoding other formats, check out my tileset crusher's code:
# https://github.com/Taffer/crushtileset/
data = layer.find('data')
data_contents = data.text
this_data = []
if data.attrib['encoding'] == 'csv':
lines = data_contents.split()
for line in lines:
for c in line.split(','):
if c != '':
this_data.append(int(c))
elif data.attrib['encoding'] == 'base64' and data.attrib.get('compression', 'none') == 'zlib':
the_data = base64.b64decode(data_contents)
# CSV data is organized into rows, so we make this one big row.
this_data = [x[0] for x in struct.iter_unpack('<I', zlib.decompress(the_data))]
else:
raise RuntimeError('Unsupported encoding/compression.')
self.layer_data[layer.attrib['name']] = this_data
def render(self, layer: str, surface: pygame.Surface, viewport: pygame.Rect, offset_x: int, offset_y: int) -> None:
# This use case seems to be faster than using blits(); the overhead of
# creating a list of tuples is probably what kills it.
max_x = min(viewport.width, self.map_width)
max_y = min(viewport.height, self.map_height)
for y in range(max_y):
for x in range(max_x):
tile = self.tiles[self.layer_data[layer][self.get_index(x + viewport.x, y + viewport.y)]]
target = pygame.Rect(offset_x + x * self.tile_width, offset_y + y * self.tile_height,
self.tile_width, self.tile_height)
if tile is not None:
surface.blit(tile, target)
def get_index(self, x: int, y: int) -> int:
return x + y * self.map_width
def get_tile(self, layer: str, x: int, y: int) -> int:
return self.layer_data[layer][self.get_index(x, y)]
# LPC Sprite for animation.
#
# This sets up a set of sprites, quads, etc. using the standard Liberated
# Pixel Cup sprite format:
#
# https://lpc.opengameart.org/static/lpc-style-guide/styleguide.html
#
# Specifically:
# * Each row is a complete animation cycle.
# * Rows are mostly in groups of four based on facing = away, left, forward,
# right.
# * Animation rows are: Spellcast, Thrust, Walk, Slash, Shoot, Hurt (only one
# facing for Hurt). We fake an Idle animation by cloning the first frame of
# Walk.
# * Are 64x64 on the sprite sheet.
# Note that this includes a non-standard animation, 'idle', made up of the
# first 'walk' frame.
LPC_ANIMATION = [
'spellcast',
'thrust',
'walk',
'slash',
'shoot',
'hurt',
'idle'
]
LPC_FACING = [
'away',
'left',
'forward',
'right'
]
FRAMES = {
LPC_ANIMATION[0]: 7, # spellcast
LPC_ANIMATION[1]: 8, # thrust
LPC_ANIMATION[2]: 9, # walk
LPC_ANIMATION[3]: 6, # slash
LPC_ANIMATION[4]: 13, # shoot
LPC_ANIMATION[5]: 6, # hurt
LPC_ANIMATION[6]: 1, # idle
}
class LPCSprite:
def __init__(self: 'LPCSprite', texture: pygame.Surface) -> None:
self.width = 64
self.height = 64
self.feet_x = self.width // 2 # Where are the feet relative to 0,0?
self.feet_y = self.height - 2
self.facing = LPC_FACING[2] # Default facing and animation.
self.animation = LPC_ANIMATION[2]
self.frame = 1
self.texture = texture
# Generate subsurfaces.
self.frames = {}
y = 0
for av in LPC_ANIMATION[:-2]: # "hurt" and "idle" are special cases
self.frames[av] = {}
for fv in LPC_FACING:
self.frames[av][fv] = []
for i in range(FRAMES[av]):
x = i * self.width
rect = pygame.Rect(x, y, self.width, self.height)
self.frames[av][fv].append(texture.subsurface(rect))
y += self.height
# "hurt" has to be special-cased because it only has one facing.
self.frames['hurt'] = {}
y = texture.get_height() - self.height
for fv in LPC_FACING:
# We'll use this animation for all four facings.
self.frames['hurt'][fv] = []
for i in range(FRAMES['hurt']):
x = i * self.width
rect = pygame.Rect(x, y, self.width, self.height)
for fv in LPC_FACING:
self.frames['hurt'][fv].append(texture.subsurface(rect))
# "idle" is fake, just the first frame from "walk"
self.frames['idle'] = {}
for fv in LPC_FACING:
self.frames['idle'][fv] = [self.frames['walk'][fv][0]]
def check_frame(self: 'LPCSprite') -> None:
if self.frame >= FRAMES[self.animation]:
self.frame = 0
def next_frame(self: 'LPCSprite') -> None:
self.frame += 1
self.check_frame()
def set_facing(self: 'LPCSprite', facing: str) -> None:
self.facing = facing
self.check_frame()
def set_animation(self: 'LPCSprite', animation: str) -> None:
self.animation = animation
self.check_frame()
def get_texture(self: 'LPCSprite') -> pygame.Surface:
return self.frames[self.animation][self.facing][self.frame]
class StateMachine:
def __init__(self: 'StateMachine', initial_state: 'StateBase'):
self.current = initial_state
self.current.enter()
def change(self: 'StateMachine', new_state: 'StateBase'):
self.current.exit()
self.current = new_state
self.current.enter()
def update(self: 'StateMachine', dt: float):
next_state = self.current.update(dt)
if next_state != self.current:
self.change(next_state)
class StateBase:
def __init__(self: 'StateBase', entity: 'Entity'):
self.entity = entity
self.ticks = 0
def enter(self: 'StateBase'):
pass
def exit(self: 'StateBase'):
pass
def update(self: 'StateBase', dt: float):
pass
class WaitState(StateBase):
def __init__(self: 'WaitState', entity: 'Entity'):
super().__init__(entity)
def enter(self: 'WaitState'):
self.entity.sprite.set_animation('idle')
def exit(self: 'WaitState'):
pass
def update(self: 'WaitState', dt: float):
walk = None
self.ticks += dt
if self.ticks > 0.1:
self.ticks -= 0.1
keystate = pygame.key.get_pressed()
if keystate[pygame.K_w] or keystate[pygame.K_UP]:
walk = {'x': 0, 'y': -1} # go up
elif keystate[pygame.K_s] or keystate[pygame.K_DOWN]:
walk = {'x': 0, 'y': 1} # go down
elif keystate[pygame.K_a] or keystate[pygame.K_LEFT]:
walk = {'x': -1, 'y': 0} # go left
elif keystate[pygame.K_d] or keystate[pygame.K_RIGHT]:
walk = {'x': 1, 'y': 0} # go right
if walk is not None:
return WalkState(self.entity, walk)
return self
class WalkState(StateBase):
def __init__(self: 'WalkState', entity: 'Entity', direction: dict):
super().__init__(entity)
self.direction = direction
self.target_x = self.entity.x
self.target_y = self.entity.y
def enter(self: 'WalkState'):
self.entity.sprite.set_animation('walk')
if self.direction['y'] == -1: # go up
self.entity.sprite.set_facing('away')
self.target_y -= 1
elif self.direction['y'] == 1: # go down
self.entity.sprite.set_facing('forward')
self.target_y += 1
elif self.direction['x'] == -1: # go left
self.entity.sprite.set_facing('left')
self.target_x -= 1
elif self.direction['x'] == 1: # go right
self.entity.sprite.set_facing('right')
self.target_x += 1
# Clamp movement to the map.
if self.target_x < 0:
self.target_x = 0
elif self.target_x >= self.entity.map.map_width:
self.target_x = self.entity.x
if self.target_y < 0:
self.target_y = 0
elif self.target_y >= self.entity.map.map_height:
self.target_y = self.entity.y
def exit(self: 'WalkState'):
pass
def update(self: 'WalkState', dt: float):
if self.target_x == self.entity.x and self.target_y == self.entity.y:
return WaitState(self.entity)
# TODO: needs tweening
self.ticks += dt
if self.ticks > 0.1:
if self.direction['y'] == -1: # go up
self.entity.offset_y -= 1
elif self.direction['y'] == 1: # go down
self.entity.offset_y += 1
elif self.direction['x'] == -1: # go left
self.entity.offset_x -= 1
elif self.direction['x'] == 1: # go right
self.entity.offset_x += 1
self.entity.sprite.next_frame()
if abs(self.entity.offset_x) >= self.entity.map.tile_width or \
abs(self.entity.offset_y) >= self.entity.map.tile_height: # Done moving.
self.entity.teleport(self.target_x, self.target_y)
return WaitState(self.entity)
return self
class Entity:
def __init__(self: 'Entity', sprite: LPCSprite, entity_map: Map):
self.sprite = sprite
self.map = entity_map
self.x = 0
self.y = 0
self.offset_x = 0 # Drawing offsets for inter-tile animation.
self.offset_y = 0
self.controller = StateMachine(WaitState(self))
def teleport(self: 'Entity', x: int, y: int):
self.x = x
self.y = y
self.offset_x = 0
self.offset_y = 0
def draw(self: 'Entity', surface: pygame.Surface, x: int, y: int):
# Draw sprite's feet at screen co-ords x, y.
rect = pygame.Rect(x - self.sprite.width // 4, y - self.sprite.height // 2, self.sprite.width, self.sprite.height)
rect.x += self.offset_x
rect.y += self.offset_y
surface.blit(self.sprite.get_texture(), rect)
def draw_tile(self: 'Entity', surface: pygame.Surface, x: int, y: int, tile_width: int, tile_height: int):
# Draw the tile the sprite thinks it's in.
rect = pygame.Rect(x * tile_width, y * tile_height, tile_width, tile_height)
pygame.gfxdraw.rectangle(surface, rect, RED)
class Demo:
def __init__(self: 'Demo', screen: pygame.Surface) -> None:
self.screen = screen
self.font = pygame.freetype.Font('resources/LiberationMono-Bold.ttf', 16)
self.sara_texture = pygame.image.load('resources/LPC_Sara/SaraFullSheet.png').convert_alpha()
self.map = Map('resources/grass-map.tmx')
# Viewport rect is in *tile* co-ordinates.
self.viewport = pygame.Rect(0, 0, 1280 // self.map.tile_width, 720 // self.map.tile_height)
self.sara = Entity(LPCSprite(self.sara_texture), self.map)
self.sara.teleport(10, 10) # Tile co-ordinates.
self.ticks = 0
def draw(self: 'Demo') -> None:
self.screen.fill(BLACK)
self.map.render('Tile Layer 1', self.screen, self.viewport, 0, 0)
self.font.render_to(self.screen, (10, 10), 'Use WASD or arrow keys to walk.', WHITE)
# Draw a rectangle to show which tile has the sprite's feet.
self.sara.draw_tile(self.screen, self.sara.x, self.sara.y, self.map.tile_width, self.map.tile_height)
# Draw Sara - We want her feet to be in the tile. This would be easier
# if the sprite were the same size as our map tiles...
self.sara.draw(self.screen, self.sara.x * self.map.tile_width, self.sara.y * self.map.tile_height)
def update(self: 'Demo', dt: float) -> None:
self.sara.controller.update(dt)
def main() -> None:
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption(SCREEN_TITLE)
demo = Demo(screen)
now = time.time()
dt = 0
playing = True
while playing:
demo.draw()
pygame.display.flip()
dt = time.time() - now
now = time.time()
demo.update(dt)
for event in pygame.event.get():
if event.type == pygame.QUIT:
playing = False
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
playing = False
pygame.quit()
sys.exit()
if __name__ == '__main__':
main()
|
Taffer/pygame-experiments
|
24-tile-movement/main.py
|
main.py
|
py
| 14,875 |
python
|
en
|
code
| 2 |
github-code
|
6
|
7176111759
|
#!/usr/bin/env python3
import os
import sys
import re
from pathlib import Path
def _find_files(project_root):
path_exclude_pattern = r"\.git($|\/)|venv|_build|\.tox"
file_exclude_pattern = r"fill_template_vars\.py|\.swp$"
filepaths = []
for dir_path, _dir_names, file_names in os.walk(project_root):
if not re.search(path_exclude_pattern, dir_path):
for file in file_names:
if not re.search(file_exclude_pattern, file):
filepaths.append(str(Path(dir_path, file)))
return filepaths
def _replace(pattern, replacement, project_root):
print(f"Replacing values: {pattern}")
for file in _find_files(project_root):
try:
with open(file) as f:
content = f.read()
content = re.sub(pattern, replacement, content)
with open(file, "w") as f:
f.write(content)
except UnicodeDecodeError:
pass
def main():
project_root = Path(os.path.realpath(sys.argv[0])).parent.parent
module_name = input("What is your python module name (ex: What would you import (no dashes)? ")
pypi_input = input(f"What is your pypi package name? (default: {module_name}) ")
pypi_name = pypi_input or module_name
repo_input = input(f"What is your github project name? (default: {pypi_name}) ")
repo_name = repo_input or pypi_name
rtd_input = input(
f"What is your readthedocs.org project name? (default: {pypi_name}) "
)
rtd_name = rtd_input or pypi_name
project_input = input(
f"What is your project name (ex: at the top of the README)? (default: {repo_name}) "
)
project_name = project_input or repo_name
short_description = input("What is a one-liner describing the project? ")
_replace("<MODULE_NAME>", module_name, project_root)
_replace("<PYPI_NAME>", pypi_name, project_root)
_replace("<REPO_NAME>", repo_name, project_root)
_replace("<RTD_NAME>", rtd_name, project_root)
_replace("<PROJECT_NAME>", project_name, project_root)
_replace("<SHORT_DESCRIPTION>", short_description, project_root)
os.makedirs(project_root / module_name, exist_ok=True)
Path(project_root / module_name / "__init__.py").touch()
Path(project_root / module_name / "py.typed").touch()
if __name__ == "__main__":
main()
|
ethereum/py-evm
|
.project-template/fill_template_vars.py
|
fill_template_vars.py
|
py
| 2,362 |
python
|
en
|
code
| 2,109 |
github-code
|
6
|
20519832620
|
"""!
@brief CCORE Wrapper for X-Means algorithm.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
from ctypes import c_double, c_longlong, c_size_t, c_uint, POINTER
from pyclustering.core.wrapper import ccore_library
from pyclustering.core.pyclustering_package import pyclustering_package, package_extractor, package_builder
def xmeans(sample, centers, kmax, tolerance, criterion, alpha, beta, repeat, random_state, metric_pointer):
random_state = random_state or -1
pointer_data = package_builder(sample, c_double).create()
pointer_centers = package_builder(centers, c_double).create()
ccore = ccore_library.get()
ccore.xmeans_algorithm.restype = POINTER(pyclustering_package)
package = ccore.xmeans_algorithm(pointer_data, pointer_centers, c_size_t(kmax), c_double(tolerance),
c_uint(criterion), c_double(alpha), c_double(beta), c_size_t(repeat),
c_longlong(random_state), metric_pointer)
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
|
annoviko/pyclustering
|
pyclustering/core/xmeans_wrapper.py
|
xmeans_wrapper.py
|
py
| 1,207 |
python
|
en
|
code
| 1,113 |
github-code
|
6
|
19250997206
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
def SI(img, x, y, p):
val = np.sum(img[y-p:y+p, x-p:x+p])
return min(max(val, 0), 255)
#Read grayscale image and conversion to float64
img=np.float64(cv2.imread('../Image_Pairs/FlowerGarden2.png',0))
(h,w) = img.shape
print("Image dimension:",h,"rows x",w,"columns")
#Direct method
t1 = cv2.getTickCount()
direct_method = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)
for y in range(1,h):
for x in range(1,w):
val = img[y, x] - img[y-1, x]
direct_method[y,x] = min(max(val,0),255)
t2 = cv2.getTickCount()
time = (t2 - t1)/ cv2.getTickFrequency()
print("Direct method:",time,"s")
plt.figure(figsize=(8, 6))
plt.imshow(direct_method, cmap='gray')
plt.title('Y derivate convolution - Direct method')
plt.axis('off')
plt.savefig("conv_direct_y_derivate.png", bbox_inches='tight')
plt.close()
#Method filter2D
t1 = cv2.getTickCount()
kernel = np.array([-1, 1])
filter2d_method = cv2.filter2D(img,-1,kernel)
t2 = cv2.getTickCount()
time = (t2 - t1)/ cv2.getTickFrequency()
print("Method filter2D :",time,"s")
plt.figure(figsize=(8, 6))
plt.imshow(direct_method, cmap='gray')
plt.title('Y derivate convolution - filter 2D')
plt.axis('off')
plt.savefig("conv_filter2D_y_derivate.png", bbox_inches='tight')
plt.close()
img_diff = filter2d_method - direct_method
plt.figure(figsize=(8, 6))
plt.imshow(img_diff, cmap='gray', vmax=255, vmin=0)
plt.title("Y derivate result difference between the direct and filter2D")
plt.axis('off')
plt.savefig("difference_y_derivate_direct-filter2D.png", bbox_inches='tight')
plt.close()
center_y = h // 2
center_x = x // 2
p = 1
q = 50
SI_image = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)
for i in range(-q//2, q//2 + 1, 1):
for j in range(-q//2, q//2 + 1, 1):
SI_image[center_y + i, center_x + j] = SI(img, center_y + i, center_x + j, p)
plt.figure(figsize=(8, 6))
plt.imshow(img, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.savefig("original_image.png", bbox_inches='tight')
plt.close()
plt.figure(figsize=(8, 6))
plt.imshow(SI_image, cmap='gray')
plt.title('SI Function with p=1 on a square of size 50 on the center')
plt.axis('off')
plt.savefig("SI_function.png", bbox_inches='tight')
plt.close()
|
gpspelle/image-mining
|
TP1/TP_Features_OpenCV/modified_Convolutions.py
|
modified_Convolutions.py
|
py
| 2,273 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70613762747
|
# addtion of two numbers
# printing integers
x = 5
X = 6
y = 8
Y = 10
equation = (2*x+3*y)*(2*X+3*Y)
print(equation)
# printing strings
small_alpha = "a"
big_alpha = "A"
print(small_alpha)
print(big_alpha)
|
De-sam/Eccowas_College_Classes
|
sam_ss2/hello_world/variables.py
|
variables.py
|
py
| 209 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39626332335
|
import numpy as np
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template("index.html")
@app.route('/predict',methods=['POST'])
def predict():
label = ""
sepallength = request.form["sepallength"]
sepalwidth = request.form["sepalwidth"]
petallength = request.form["petallength"]
petalwidth =request.form["petallength"]
int_features = [sepallength,sepalwidth , petallength ,petalwidth]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)[0]
if prediction == 0 :
label = "Iris-virginica"
elif prediction == 1:
label = "Iris-versicolor"
else:
label = "Iris-setosa"
return render_template('index.html', prediction_text='Predicted Flower should be $ {}'.format(label))
if __name__ == "__main__":
app.run(debug=True)
|
Karthicksaga/IRIS
|
app.py
|
app.py
|
py
| 938 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72690099708
|
import sys
from antlr4 import *
from xpathLexer import xpathLexer
from xpathParser import xpathParser
from MyErrorListener import MyErrorListener
import io
def main(argv):
input = FileStream(argv[1], encoding = 'utf8')
lexer = xpathLexer(input)
lexer.removeErrorListeners()
lexer.addErrorListener(MyErrorListener())
stream = CommonTokenStream(lexer)
parser = xpathParser(stream)
#error = io.StringIO()
#print(error.read())
#parser.removeErrorListeners()
#errorListener = ExprErrorListener(error)
#parser.removeErrorListeners()
#parser.addErrorListener(ExprErrorListener())
try:
tree = parser.main()
pass
except SyntaxError:
print("\n ++++++ Syntax Error ++++++ \n")
except Exception as e:
print("Error is: " + str(e))
print("\n ++++++ Other Error ++++++ \n")
if __name__ == '__main__':
main(sys.argv)
|
bendrissou/glade-replication
|
antlr4/xpath/parse.py
|
parse.py
|
py
| 952 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10422721293
|
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, TypeVar
if TYPE_CHECKING:
from collections.abc import Iterator
T = TypeVar("T", bound=Enum)
def iterate_enum(enum_class: type[T]) -> Iterator[T]:
assert issubclass(enum_class, Enum)
yield from enum_class
def add_long_name(enum_class: type[T], names: dict[T, str]) -> None:
add_per_enum_field(enum_class, "long_name", names)
def add_per_enum_field(enum_class: type[T], field_name: str, names: dict[T, Any]) -> None:
if set(enum_class) != set(names.keys()):
raise ValueError(f"{field_name} for {enum_class} are not synchronized")
for key, value in names.items():
setattr(key, field_name, value)
|
randovania/randovania
|
randovania/lib/enum_lib.py
|
enum_lib.py
|
py
| 739 |
python
|
en
|
code
| 165 |
github-code
|
6
|
11464353853
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 7 13:18:01 2022
@author: sampasmann
"""
import time
import numpy as np
from mpi4py import MPI
from src.functions.save_data import SaveData
from src.solvers.fixed_source.solvers import Picard
from src.solvers.eigenvalue.maps import MatVec_data, MatVec
from scipy.sparse.linalg import gmres, lgmres, bicgstab, LinearOperator
import scipy.linalg as sp
from src.solvers.eigenvalue.maps import SI_Map
# =============================================================================
# Iteration and Residual Storage for Krylov Solvers
# =============================================================================
class gmres_counter(object):
def __init__(self, disp=True):
self._disp = disp
self.iter = 0
self.callbacks = []
def __call__(self, rk=None):
self.callbacks.append(rk.copy())
self.iter += 1
if self._disp:
if (self.iter>1):
print(" Iteration:", self.iter-1, "change: ",
np.linalg.norm((rk - self.callbacks[self.iter-2])))
# =============================================================================
# Power Iteration
# =============================================================================
# TODO: Picard PI is not working
def PowerIteration(qmc_data, solver="LGMRES", max_outter_itt=10,
max_inner_itt=10, outter_tol=1e-5, inner_tol=1e-5,
report_progress=True):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# nproc = comm.Get_size()
itt = 0
k = qmc_data.keff
dk = 1.0
phi_old = qmc_data.tallies.phi_f.copy()
#res_hist = []
k_hist = []
if (rank==0):
print("")
print(" โโโ โโโโโโโ โโโโ โโโโ โโโโโโโ")
print(" โ โโโโโโโโโโโโโโโ โโโโโโโโโโโโโ")
print(" โโโโโโ โโโโโโโโโโโโโโโโโ ")
print(" โโโโโโโโ โโโโโโโโโโโโโโโโโ ")
print(" โโโโโโโโโโโโโโโ โโโ โโโโโโโโโโโ")
print(" โโโ โโโโโโโ โโโ โโโ โโโโโโโ")
print("")
print("--------- K-Effective Eigenvalue Problem ---------")
print("Outter Solver: Power Iteration")
print("Inner Sovler:", solver)
print("Material: ", qmc_data.material_code)
print("Random Number Generator: ", qmc_data.generator)
print("Number of Particles per Iteration: ", qmc_data.N)
print("Number of Spatial Cells: ", qmc_data.Nx)
print("Initial K: ", qmc_data.keff)
# iterate over k effective
while (itt<=max_outter_itt) and (dk>=outter_tol):
# iterate over scattering source
phi_new = InnerIteration(qmc_data, solver=solver,
maxit=max_inner_itt,tol=inner_tol,
report_progress=report_progress)
#phi_hist.append(phi_new)
k_old = k
k = UpdateK(phi_old, phi_new, qmc_data)
k_hist.append(k)
qmc_data.keff = k
#res_hist.append(np.linalg.norm(phi_new-phi_old))
qmc_data.tallies.phi_f = phi_new.copy()
phi_old = phi_new.copy() # /norm(phi_new)
if (qmc_data.source_tilt):
qmc_data.tallies.dphi_f = qmc_data.tallies.dphi_s
dk = abs(k-k_old)
itt += 1
if (rank==0) and (report_progress):
print("**********************")
print("Iteration:", itt)
print("k: ", k)
print("dk: ",dk)
if (rank==0):
if (itt>=max_outter_itt):
print("Power Iteration convergence to tolerance not achieved: Maximum number of iterations.")
elif (dk<=outter_tol):
print("-------------------------------")
print("Successful Power Iteration convergence.")
return phi_new, k_hist, itt #, res_hist
# =============================================================================
# Inner Source Iteration for Power Iteration
# =============================================================================
# TODO: make exitCode an actual output from Picard
def InnerIteration(qmc_data,solver="LGMRES",tol=1e-5,maxit=50,save_data=False,
report_progress=True):
"""
Parameters
----------
qmc_data : TYPE
DESCRIPTION.
tol : TYPE, optional
DESCRIPTION. The default is 1e-5.
maxit : TYPE, optional
DESCRIPTION. The default is 50.
Returns
-------
phi : TYPE
DESCRIPTION.
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nproc = comm.Get_size()
Nx = qmc_data.Nx
G = qmc_data.G
Nv = Nx*G
Nt = qmc_data.Nt
start = time.time()
matvec_data = MatVec_data(qmc_data)
if (qmc_data.source_tilt):
phi0 = np.append(qmc_data.tallies.phi_avg, qmc_data.tallies.dphi_s)
else:
phi0 = qmc_data.tallies.phi_avg
phi0 = np.reshape(phi0,(Nt,1))
if (rank==0) and (report_progress):
print(" Inner Iteration: ")
if (solver=="Picard"):
phi = Picard(qmc_data,tol=tol,maxit=maxit,save_data=False,
report_progress=report_progress)
exitCode = 0
else:
A = LinearOperator((Nt,Nt),
matvec=MatVec,
rmatvec=MatVec,
matmat= MatVec,
rmatmat=MatVec,
dtype=float)
b = matvec_data[0]
if (solver=="LGMRES"):
counter = gmres_counter(disp=report_progress)
gmres_out = lgmres(A,b,x0=phi0,tol=tol,maxiter=maxit, callback=counter)
elif (solver=="GMRES"):
counter = gmres_counter(disp=report_progress)
gmres_out = gmres(A,b,x0=phi0,tol=tol,maxiter=maxit, callback=counter)
elif (solver=="BICGSTAB"):
counter = gmres_counter(disp=report_progress)
gmres_out = bicgstab(A,b,x0=phi0,tol=tol,maxiter=maxit, callback=counter)
else:
print(" Not a valid solver ")
Exception
phi = gmres_out[0]
exitCode = gmres_out[1]
stop = time.time()
run_time = stop - start
if (qmc_data.source_tilt):
phi = phi[:Nv]
phi = np.reshape(phi, (Nx,G))
if (rank==0):
if (save_data):
sim_data = SimData(phi, run_time, tol, nproc)
SaveData(qmc_data, sim_data)
if (exitCode>0) and (report_progress):
print(" Convergence to tolerance not achieved: Maximum number of iterations.")
elif (exitCode<0) and (report_progress):
print(" Illegal input or breakdown.")
elif (exitCode==0) and (report_progress):
print(" Successful convergence.")
return phi
def UpdateK(phi_f, phi_s, qmc_data):
keff = qmc_data.keff
material = qmc_data.material
keff *= (np.sum(material.nu*material.sigf*phi_s)
/np.sum(material.nu*material.sigf*phi_f))
return keff
# =============================================================================
# Davidson's Algorithm
# =============================================================================
# TODO: Correct normalization of scalar flux in Davidson's output
# TODO: Enable Source Tilting with Davidson's
def Davidson(qmc_data, k0=1.0, l=1, m=None, numSweeps=8, tol=1e-6, maxit=30,
report_progress=True):
"""
Parameters
----------
qmc_data : qmc_data structure
k0 : Float, optional
DESCRIPTION. The default is 1.0.
l : Int, optional
DESCRIPTION. Number of eigenvalues and vectors to solver for The default is 1.
m : Int, optional
DESCRIPTION. Restart parameter. The default is 5.
numSweeps : Int, optional
DESCRIPTION. The default is 5.
tol : Float, optional
DESCRIPTION. The default is 1e-6.
maxit : Int, optional
DESCRIPTION. The default is 30.
Returns
-------
phi : TYPE
DESCRIPTION.
keff : TYPE
DESCRIPTION.
itt : TYPE
DESCRIPTION.
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Davidson Parameters
Nt = qmc_data.Nt
if (qmc_data.source_tilt):
phi0 = np.append(qmc_data.tallies.phi_avg, qmc_data.tallies.dphi_s)
else:
phi0 = qmc_data.tallies.phi_avg
phi0 = np.reshape(phi0,(Nt))
# u = qmc_data.tallies.phi_f.reshape(Nt)
V0 = np.array(phi0/np.linalg.norm(phi0).T) # orthonormalize initial guess
V = np.zeros((Nt,maxit))
axv = np.zeros((Nt,maxit))
bxv = np.zeros((Nt,maxit))
Vsize = 1
V[:,0] = V0
k_old = 0.0
dk = 1.0
itt = 1
if (rank==0):
print("")
print(" โโโ โโโโโโโ โโโโ โโโโ โโโโโโโ")
print(" โ โโโโโโโโโโโโโโโ โโโโโโโโโโโโโ")
print(" โโโโโโ โโโโโโโโโโโโโโโโโ ")
print(" โโโโโโโโ โโโโโโโโโโโโโโโโโ ")
print(" โโโโโโโโโโโโโโโ โโโ โโโโโโโโโโโ")
print(" โโโ โโโโโโโ โโโ โโโ โโโโโโโ")
print("")
print("--------- K-Effective Eigenvalue Problem ---------")
print("Outter Solver: Davidson's Method")
print("Material: ", qmc_data.material_code)
print("Random Number Generator: ", qmc_data.generator)
print("Number of Particles per Iteration: ", qmc_data.N)
print("Number of Spatial Cells: ", qmc_data.Nx)
print("Initial K: ", qmc_data.keff)
if (m is None):
m = maxit+1 # unless specified there is no restart parameter
V[:,:Vsize] = PreConditioner(V[:,:Vsize], qmc_data, numSweeps)
# Davidson Routine
while (itt <= maxit) and (dk>=tol):
#print(V)
if (report_progress):
print("**********************")
print(" Davidson Iteration: ", itt)
axv[:,Vsize-1] = AxV(V[:,:Vsize], qmc_data)[:,0]
bxv[:,Vsize-1] = BxV(V[:,:Vsize], qmc_data)[:,0]
AV = np.dot(V[:,:Vsize].T, axv[:,:Vsize]) # Scattering linear operator
BV = np.dot(V[:,:Vsize].T, bxv[:,:Vsize]) # Fission linear operator
[Lambda, w] = sp.eig(AV, b=BV) # solve for eigenvalues and vectors
idx = Lambda.argsort() # get indices of eigenvalues from smallest to largest
Lambda = Lambda[idx] # sort eigenvalues from smalles to largest
assert(Lambda.imag.all() == 0.0)# there can't be any imaginary eigenvalues
Lambda = Lambda[:l].real # take the real component of the l largest eigenvalues
k = 1/Lambda
dk = abs(k - k_old)
if (report_progress):
print("K Effective: ", k)
print("dk: ",dk)
k_old = k
w = w[:,idx] # sort corresponding eigenvector
w = w[:,:l].real # take the l largest eigenvectors
u = np.dot(V[:,:Vsize],w) # Ritz vectors
res = AxV(u, qmc_data) - Lambda*BxV(u, qmc_data) # residual
t = PreConditioner(res, qmc_data, numSweeps)
if (Vsize <= m-l ):
Vsize += 1
V[:,:Vsize] = Gram(V[:,:Vsize-1],t) # appends new orthogonalization to V
else:
Vsize = 2
V[:,:Vsize] = Gram(u,t) # "restarts" by appending to a new array
if (itt==maxit):
print(" Convergence to tolerance not achieved: Maximum number of iterations.")
break
else:
print(" Successful convergence.")
itt += 1
keff = 1/Lambda
phi = V[:,0]
phi = phi/np.linalg.norm(phi).T
return phi, keff, itt
# =============================================================================
# Functions for Davidson's Method
# =============================================================================
def AxV(V, qmc_data):
"""
Linear operator for scattering term (I-L^(-1)S)*phi
"""
v = V[:,-1]
Nx = qmc_data.Nx
G = qmc_data.G
Nt = qmc_data.Nt
zed = np.zeros((Nx,G))
phi_in = np.reshape(v, (Nt,1))
axv = (phi_in - SI_Map(zed, phi_in, qmc_data))
return axv
def BxV(V, qmc_data):
"""
Linear operator for fission term (L^(-1)F*phi)
"""
v = V[:,-1]
Nx = qmc_data.Nx
G = qmc_data.G
Nv = int(Nx*G)
Nt = qmc_data.Nt
zed = np.zeros(Nt)
phi_in = np.reshape(v, (Nt,1))
if (qmc_data.source_tilt):
dphi = qmc_data.tallies.dphi_s
qmc_data.tallies.dphi_s = zed
bxv = SI_Map(phi_in, zed, qmc_data)
if (qmc_data.source_tilt):
qmc_data.tallies.dphi_s = dphi
v[Nv:] = dphi.reshape(Nv)
return bxv
def PreConditioner(V, qmc_data, numSweeps=8):
"""
Linear operator approximation of L^(-1)S
In this case the preconditioner is a specified number of purely scattering
transport sweeps.
"""
v = V[:,-1]
Nx = qmc_data.Nx
G = qmc_data.G
Nt = qmc_data.Nt
Nv = Nx*G
zed = np.zeros((Nx,G))
phi_in = np.reshape(v, (Nt,1))
for i in range(numSweeps):
phi_in = SI_Map(zed, phi_in, qmc_data)
return phi_in
def Gram(V,u):
"""
Modified Gram Schmidt
"""
w1 = u - np.dot(V,np.dot(V.T,u))
v1 = w1 / np.linalg.norm(w1)
w2 = v1 - np.dot(V,np.dot(V.T,v1))
v2 = w2 / np.linalg.norm(w2)
V = np.append(V, v2, axis=1)
return V
# =============================================================================
# Misc Functions
# =============================================================================
def SimData(phi, time, tol, nproc):
data = {
"phi": phi,
"run_time": time,
"tolerance": tol,
"nproc": nproc
}
return data
|
spasmann/iQMC
|
src/solvers/eigenvalue/solvers.py
|
solvers.py
|
py
| 14,940 |
python
|
en
|
code
| 2 |
github-code
|
6
|
19209409927
|
import re
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# Dictionary mapping word contractions to their full words
contractions = {
"ain't": "are not","'s":" is","aren't": "are not",
"can't": "cannot","can't've": "cannot have",
"'cause": "because","could've": "could have","couldn't": "could not",
"couldn't've": "could not have", "didn't": "did not","doesn't": "does not",
"don't": "do not","hadn't": "had not","hadn't've": "had not have",
"hasn't": "has not","haven't": "have not","he'd": "he would",
"he'd've": "he would have","he'll": "he will", "he'll've": "he will have",
"how'd": "how did","how'd'y": "how do you","how'll": "how will",
"I'd": "I would", "I'd've": "I would have","I'll": "I will",
"I'll've": "I will have","I'm": "I am","I've": "I have", "isn't": "is not",
"it'd": "it would","it'd've": "it would have","it'll": "it will",
"it'll've": "it will have", "let's": "let us","ma'am": "madam",
"mayn't": "may not","might've": "might have","mightn't": "might not",
"mightn't've": "might not have","must've": "must have","mustn't": "must not",
"mustn't've": "must not have", "needn't": "need not",
"needn't've": "need not have","o'clock": "of the clock","oughtn't": "ought not",
"oughtn't've": "ought not have","shan't": "shall not","sha'n't": "shall not",
"shan't've": "shall not have","she'd": "she would","she'd've": "she would have",
"she'll": "she will", "she'll've": "she will have","should've": "should have",
"shouldn't": "should not", "shouldn't've": "should not have","so've": "so have",
"that'd": "that would","that'd've": "that would have", "there'd": "there would",
"there'd've": "there would have", "they'd": "they would",
"they'd've": "they would have","they'll": "they will",
"they'll've": "they will have", "they're": "they are","they've": "they have",
"to've": "to have","wasn't": "was not","we'd": "we would",
"we'd've": "we would have","we'll": "we will","we'll've": "we will have",
"we're": "we are","we've": "we have", "weren't": "were not","what'll": "what will",
"what'll've": "what will have","what're": "what are", "what've": "what have",
"when've": "when have","where'd": "where did", "where've": "where have",
"who'll": "who will","who'll've": "who will have","who've": "who have",
"why've": "why have","will've": "will have","won't": "will not",
"won't've": "will not have", "would've": "would have","wouldn't": "would not",
"wouldn't've": "would not have","y'all": "you all", "y'all'd": "you all would",
"y'all'd've": "you all would have","y'all're": "you all are",
"y'all've": "you all have", "you'd": "you would","you'd've": "you would have",
"you'll": "you will","you'll've": "you will have", "you're": "you are",
"you've": "you have"
}
STOPWORDS = stopwords.words('english')
meaningless_words = ['hotel','stay','hilton','location','room','service','airport','staff','london','night','flight','overnight','rooms', 'experience','gatwick','ever','holiday','one', 'stayed','would','breakfast','bed','check','get','us','time','reception','terminal','bar','food','booked','walk','bathroom', 'really','early','could','also','restaurant','morning','even','floor','next','back','day','two', 'got','executive','south','shower','first','long','need','area', 'minutes','lounge','went','much','told','sleep', 'arrived','hotels','work','station','nights','beds', 'quite','bit','go','people','car']
for word in meaningless_words:
STOPWORDS.append(word)
# Remove punctutation marks, stopwords, emojis, urls, convert to lowercase, expand contractions, hashtags, retweet
def preprocess_review(review):
res_review = []
lemmatizer = WordNetLemmatizer()
for word in review.split():
# Convert to lowercase
word = word.lower()
# Expand Contractions
word = contractions.get(word, word)
for w in word.split(" "):
# Remove stopwords
if w not in STOPWORDS:
# w = splitter.split(w)
# Remove punctuation
w = re.sub(r'[^\w\s]', '', str(w))
# Remove numbers
w = re.sub(r'\d+', '', w)
# Lemmatize the word
w = lemmatizer.lemmatize(w, pos='v')
if w != '':
res_review.append(w)
return ' '.join([word for word in res_review])
|
kelvinchumbe/Hotel-Review-Mining-and-Web-App
|
Hotel Review Mining/Web App Deployment/api/preprocessing_utils.py
|
preprocessing_utils.py
|
py
| 4,650 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71281284349
|
# mysql ํ
์ด๋ธ ์์ฑ ๋ฐ ๋ฐ์ดํฐ ์ถ๊ฐ
import pandas as pd
import pymysql
xl_file = '/Users/JaehoByun/JB/_School/2021_2 แแ
ฆแแ
ตแแ
ฅแแ
กแแ
ตแแ
ฅแซแแ
ณ/แแ
ชแแ
ฆแแ
ตแพแแ
ตแแ
ฅแท/score.xlsx'
df = pd.read_excel(xl_file)
conn = pymysql.connect(host='localhost', user='root', password='chunjay606', db='data_science')
curs = conn.cursor(pymysql.cursors.DictCursor)
# ํ
์ด๋ธ ์์ฑ
mk_table_sql = """create table if not exists score
(sno int primary key,
attendance float,
homework float,
discussion int,
midterm float,
final float,
score float,
grade varchar(3))"""
curs.execute(mk_table_sql)
# ๋ฐ์ดํฐ ๋ฃ๊ธฐ
insert_sql = """insert into score(sno, attendance, homework, discussion, midterm, final, score, grade)
values (%s, %s, %s, %s, %s, %s, %s, %s)"""
for idx in range(len(df)):
curs.execute(insert_sql, tuple(df.values[idx]))
conn.commit()
# ๋ฐ์ดํฐ ์ฝ์
ํ์ธ
show_table_sql = 'select * from score'
curs.execute(show_table_sql)
row = curs.fetchone()
while row:
print(row)
row = curs.fetchone()
curs.close()
conn.close()
|
bjho606/python_school-data-science
|
score_assignment2.py
|
score_assignment2.py
|
py
| 1,130 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14466536313
|
'''
Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
'''
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
sol = []
self.backtrack(nums, 0, sol)
return sol
def backtrack(self, nums, first, sol):
if len(nums) == first:
sol.append(nums[:])
for i in range(first, len(nums)):
nums[first], nums[i] = nums[i], nums[first]
self.backtrack(nums, first+1, sol)
nums[first], nums[i] = nums[i], nums[first]
|
loganyu/leetcode
|
problems/046_permutations.py
|
046_permutations.py
|
py
| 671 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30366445741
|
from traits.api import Bool, Instance, Float, Property
# Local relative imports
from .abstract_mapper import AbstractMapper
from .data_range_1d import DataRange1D
class Base1DMapper(AbstractMapper):
"""Defines an abstract mapping from a 1-D region in input space to a 1-D
region in output space.
"""
#: The data-space bounds of the mapper.
range = Instance(DataRange1D)
#: The screen space position of the lower bound of the data space.
low_pos = Float(0.0)
#: The screen space position of the upper bound of the data space.
high_pos = Float(1.0)
#: Convenience property to get low and high positions in one structure.
#: Must be a tuple (low_pos, high_pos).
screen_bounds = Property
#: Should the mapper stretch the dataspace when its screen space bounds are
#: modified (default), or should it preserve the screen-to-data ratio and
#: resize the data bounds? If the latter, it will only try to preserve
#: the ratio if both screen and data space extents are non-zero.
stretch_data = Bool(True)
#: The sign of the mapping: 1 if deltas match sign, -1 if opposite sign
sign = Property
# If the subclass uses a cache, _cache_valid is maintained to
# monitor its status
_cache_valid = Bool(False, transient=True)
# Indicates whether or not the bounds have been set at all, or if they
# are at their initial default values.
_low_bound_initialized = Bool(False)
_high_bound_initialized = Bool(False)
# ------------------------------------------------------------------------
# Event handlers
# ------------------------------------------------------------------------
def _low_pos_changed(self, old, new):
self._cache_valid = False
if not self.stretch_data:
self._adjust_range((old, self.high_pos), (new, self.high_pos))
self._low_bound_initialized = True
self.updated = True
def _high_pos_changed(self, old, new):
self._cache_valid = False
if not self.stretch_data:
self._adjust_range((self.low_pos, old), (self.low_pos, new))
self._high_bound_initialized = True
self.updated = True
def _range_changed(self, old, new):
if old is not None:
old.observe(self._range_change_handler, "updated", remove=True)
if new is not None:
new.observe(self._range_change_handler, "updated")
self._cache_valid = False
self.updated = new
def _range_change_handler(self, event):
"Handles the range changing; dynamically attached to our ranges"
self._cache_valid = False
self.updated = event.object
def _get_screen_bounds(self):
return (self.low_pos, self.high_pos)
def _get_sign(self):
delta_screen = self.high_pos - self.low_pos
delta_data = self.range.high - self.range.low
if delta_screen == 0 or delta_data == 0:
return 0
elif delta_screen / float(delta_data) < 0:
return -1
else:
return 1
def _set_screen_bounds(self, new_bounds):
if new_bounds[0] == self.low_pos and new_bounds[1] == self.high_pos:
return
if not self.stretch_data:
self._adjust_range((self.low_pos, self.high_pos), new_bounds)
self.trait_setq(low_pos=new_bounds[0])
self.trait_setq(high_pos=new_bounds[1])
self._cache_valid = False
self._low_bound_initialized = True
self._high_bound_initialized = True
self.updated = True
def _adjust_range(self, old_bounds, new_bounds):
initialized = (
self._low_bound_initialized and self._high_bound_initialized
)
if self.range is not None and initialized:
rangelow = self.range.low
rangehigh = self.range.high
d_data = rangehigh - rangelow
old_d_screen = old_bounds[1] - old_bounds[0]
if d_data != 0 and old_d_screen != 0:
new_data_extent = (
d_data / old_d_screen * (new_bounds[1] - new_bounds[0])
)
self.range.set_bounds(rangelow, rangelow + new_data_extent)
|
enthought/chaco
|
chaco/base_1d_mapper.py
|
base_1d_mapper.py
|
py
| 4,221 |
python
|
en
|
code
| 286 |
github-code
|
6
|
17635143913
|
from collections import Counter
import pandas as pd
def transform(new_subjects):
list_keys = list(Counter(new_subjects).keys())
list_values = list(Counter(new_subjects).values())
df_keys = pd.DataFrame(list_keys, columns=['subject'])
df_values = pd.DataFrame(list_values, columns=['frequency'])
df_arxiv = pd.concat([df_keys, df_values], axis=1)
df_arxiv['frequency'] = pd.to_numeric(df_arxiv['frequency'])
df_arxiv = df_arxiv.sort_values(by=['frequency'], ascending=False)
return df_arxiv
|
ThomasKranz/arxiv_ETL
|
src/transformer.py
|
transformer.py
|
py
| 526 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2725937228
|
n = int(input())
for _ in range(n):
counter = 0
t = int(input())
tiros = [int(x) for x in input().split()]
pulos = input()
for j in range(t):
if (tiros[j] <= 2 and pulos[j] == 'S') or (tiros[j] > 2 and pulos[j] == 'J'):
counter += 1
print(counter)
|
wolney-fo/beecrowd
|
2-AD-HOC/python/beecrowd_1250.py
|
beecrowd_1250.py
|
py
| 304 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40546336482
|
import dlib
import os
import numpy as np
import matplotlib.pyplot as plt
"""
ๆญคๆไปถไธบๆญฃๅไบบ่ธๆฃๆตๆจกๅ๏ผ้็จdlibๅฎ็ฐ
"""
def _shape_to_np(shape):
xy = []
for i in range(68):
xy.append((shape.part(i).x, shape.part(i).y,))
xy = np.asarray(xy, dtype='float32')
return xy
def get_landmarks(img, detector, predictor, PlotOn=False):
"""
่ทๅไบบ่ธ็นๅพ็น
"""
lmarks = []
dets, scores, idx = detector.run(img, 1)
# dets = [dlib.rectangle(left=0, top=0, right=img.shape[1], bottom=img.shape[0])]
print("Number of faces detected: {}".format(len(dets)))
if len(dets) > 0:
shapes = []
for k, det in enumerate(dets):
shape = predictor(img, det)
shapes.append(shape)
xy = _shape_to_np(shape)
lmarks.append(xy)
lmarks = np.asarray(lmarks, dtype='float32')
lmarks = lmarks[0, :, :].T
if PlotOn:
display_landmarks(img, lmarks)
return lmarks
else:
return lmarks
def display_landmarks(img, lmarks):
for i in range(68):
xy = lmarks[:, i]
plt.plot(xy[0], xy[1], 'ro')
plt.text(xy[0], xy[1], str(i))
plt.imshow(img)
plt.show()
|
hamster1963/face-all-in-one-machine-backend
|
face_irobot_main/facial_feature_detector.py
|
facial_feature_detector.py
|
py
| 1,246 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27103152939
|
from itertools import product
import numpy as np
import pytest
from dcegm.pre_processing.params import process_params
from numpy.testing import assert_array_almost_equal as aaae
from scipy.special import roots_sh_legendre
from scipy.stats import norm
from toy_models.consumption_retirement_model.budget_functions import (
_calc_stochastic_income,
)
from toy_models.consumption_retirement_model.budget_functions import budget_constraint
model = ["deaton", "retirement_taste_shocks", "retirement_no_taste_shocks"]
labor_choice = [0, 1]
period = [0, 5, 7]
max_wealth = [11, 33, 50]
n_grid_points = [101, 444, 1000]
TEST_CASES = list(product(model, period, labor_choice, max_wealth, n_grid_points))
@pytest.mark.parametrize(
"model, period, labor_choice, max_wealth, n_grid_points", TEST_CASES
)
def test_get_beginning_of_period_wealth(
model, period, labor_choice, max_wealth, n_grid_points, load_example_model
):
params, options = load_example_model(f"{model}")
params = process_params(params)
sigma = params["sigma"]
r = params["interest_rate"]
consump_floor = params["consumption_floor"]
n_quad_points = options["quadrature_points_stochastic"]
child_state_dict = {"period": period, "lagged_choice": labor_choice}
savings_grid = np.linspace(0, max_wealth, n_grid_points)
_quad_points, _ = roots_sh_legendre(n_quad_points)
quad_points = norm.ppf(_quad_points) * sigma
random_saving_scalar = np.random.randint(0, n_grid_points)
random_shock_scalar = np.random.randint(0, n_quad_points)
wealth_beginning_of_period = budget_constraint(
**child_state_dict,
savings_end_of_previous_period=savings_grid[random_saving_scalar],
income_shock_previous_period=quad_points[random_shock_scalar],
options=options,
params=params,
)
_labor_income = _calc_stochastic_income(
**child_state_dict,
wage_shock=quad_points[random_shock_scalar],
min_age=options["min_age"],
constant=params["constant"],
exp=params["exp"],
exp_squared=params["exp_squared"],
)
budget_expected = (1 + r) * savings_grid[random_saving_scalar] + _labor_income
aaae(wealth_beginning_of_period, max(consump_floor, budget_expected))
|
OpenSourceEconomics/dcegm
|
tests/test_budget_equation.py
|
test_budget_equation.py
|
py
| 2,269 |
python
|
en
|
code
| 15 |
github-code
|
6
|
910639080
|
import numpy as np
from horton.io.utils import set_four_index_element
__all__ = ['load_fcidump', 'dump_fcidump']
def load_fcidump(filename):
'''Read one- and two-electron integrals from a Molpro 2012 FCIDUMP file.
Works only for restricted wavefunctions.
Keep in mind that the FCIDUMP format changed in Molpro 2012, so files generated with
older versions are not supported.
Parameters
----------
filename : str
The filename of the fcidump file.
Returns
-------
results : dict
Data loaded from the file, with keys: ``nelec``, ``ms2``, ``one_mo``, ``two_mo``,
``core_energy``.
'''
with open(filename) as f:
# check header
line = next(f)
if not line.startswith(' &FCI NORB='):
raise IOError('Error in FCIDUMP file header')
# read info from header
words = line[5:].split(',')
header_info = {}
for word in words:
if word.count('=') == 1:
key, value = word.split('=')
header_info[key.strip()] = value.strip()
nbasis = int(header_info['NORB'])
nelec = int(header_info['NELEC'])
ms2 = int(header_info['MS2'])
# skip rest of header
for line in f:
words = line.split()
if words[0] == "&END" or words[0] == "/END" or words[0]=="/":
break
# read the integrals
one_mo = np.zeros((nbasis, nbasis))
two_mo = np.zeros((nbasis, nbasis, nbasis, nbasis))
core_energy = 0.0
for line in f:
words = line.split()
if len(words) != 5:
raise IOError('Expecting 5 fields on each data line in FCIDUMP')
value = float(words[0])
if words[3] != '0':
ii = int(words[1])-1
ij = int(words[2])-1
ik = int(words[3])-1
il = int(words[4])-1
# Uncomment the following line if you want to assert that the
# FCIDUMP file does not contain duplicate 4-index entries.
#assert two_mo.get_element(ii,ik,ij,il) == 0.0
set_four_index_element(two_mo, ii, ik, ij, il, value)
elif words[1] != '0':
ii = int(words[1])-1
ij = int(words[2])-1
one_mo[ii, ij] = value
one_mo[ij, ii] = value
else:
core_energy = value
return {
'nelec': nelec,
'ms2': ms2,
'one_mo': one_mo,
'two_mo': two_mo,
'core_energy': core_energy,
}
def dump_fcidump(filename, data):
'''Write one- and two-electron integrals in the Molpro 2012 FCIDUMP format.
Works only for restricted wavefunctions.
Keep in mind that the FCIDUMP format changed in Molpro 2012, so files
written with this function cannot be used with older versions of Molpro
Parmeters
---------
filename : str
The filename of the FCIDUMP file. This is usually "FCIDUMP".
data : IOData
Must contain ``one_mo``, ``two_mo``. May contain ``core_energy``, ``nelec`` and
``ms``.
'''
with open(filename, 'w') as f:
one_mo = data.one_mo
two_mo = data.two_mo
nactive = one_mo.shape[0]
core_energy = getattr(data, 'core_energy', 0.0)
nelec = getattr(data, 'nelec', 0)
ms2 = getattr(data, 'ms2', 0)
# Write header
print(' &FCI NORB=%i,NELEC=%i,MS2=%i,' % (nactive, nelec, ms2), file=f)
print(' ORBSYM= '+",".join(str(1) for v in range(nactive))+",", file=f)
print(' ISYM=1', file=f)
print(' &END', file=f)
# Write integrals and core energy
for i in range(nactive):
for j in range(i+1):
for k in range(nactive):
for l in range(k+1):
if (i*(i+1))/2+j >= (k*(k+1))/2+l:
value = two_mo[i, k, j, l]
if value != 0.0:
print('%23.16e %4i %4i %4i %4i' % (value, i+1, j+1, k+1, l+1), file=f)
for i in range(nactive):
for j in range(i+1):
value = one_mo[i, j]
if value != 0.0:
print('%23.16e %4i %4i %4i %4i' % (value, i+1, j+1, 0, 0), file=f)
if core_energy != 0.0:
print('%23.16e %4i %4i %4i %4i' % (core_energy, 0, 0, 0, 0), file=f)
|
theochem/horton
|
horton/io/molpro.py
|
molpro.py
|
py
| 4,488 |
python
|
en
|
code
| 83 |
github-code
|
6
|
39485957424
|
"""ะะพะดัะปั ะฑะฐะทั ะดะฐะฝะฝัั
ั
ัะฐะฝััะตะน ะฟะพะปัะทะพะฒะฐัะตะปะตะน ะธ ะธั
ะธััะพัะธั"""
import datetime as dt
from typing import Optional
import enum
from functools import cached_property
import sqlalchemy as sa
from sqlalchemy import create_engine, select, ForeignKey
from sqlalchemy.orm import (
Session,
DeclarativeBase,
Mapped,
mapped_column,
sessionmaker,
relationship
)
from .. import settings
from ..server.auth import AuthMixin
engine = create_engine(settings.database_path)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
class Base(DeclarativeBase):
pass
class User(Base, AuthMixin):
__tablename__ = 'user'
id: Mapped[int] = mapped_column(primary_key=True)
account_name: Mapped[str]
password: Mapped[str]
has_entered: Mapped[Optional[bool]] = mapped_column(default=False)
histories: Mapped[list['History']] = relationship(
back_populates='user',
cascade='all, delete'
)
contacts: Mapped[list['Contact']] = relationship(
back_populates='user',
foreign_keys='Contact.user_id'
)
friends_with_us: Mapped[list['Contact']] = relationship(
back_populates='user',
foreign_keys='Contact.friend_id'
)
@property
def friends(self) -> list['User']:
session = Session.object_session(self)
subq = select(Contact).where(Contact.user_id == self.id).subquery()
stmt = select(User).join(subq, User.id == subq.c.friend_id)
select(User).join_from(User, User.contacts).where(Contact.user_id == 1)
return session.scalars(stmt).all()
@cached_property
def user_service(self):
from ..server.user_service import UserService
session = Session.object_session(self)
return UserService(session)
def _get_last_event_time(self, event: 'History.Event'):
session = Session.object_session(self)
stm = (
select(History.time)
.filter_by(user_id=self.id, event=event)
.order_by(History.time.desc())
)
result = session.scalars(stm).first()
return result
@property
def last_login(self):
return self._get_last_event_time(event=History.Event.login)
@property
def last_logout(self):
return self._get_last_event_time(event=History.Event.logout)
@property
def last_send_message(self):
return self._get_last_event_time(event=History.Event.user_send_message_to_server)
@property
def last_get_message(self):
return self._get_last_event_time(event=History.Event.user_get_message_from_server)
def __repr__(self):
return (
f'User(id={self.id}, account_name={self.account_name}),'
)
def is_online(self):
if not self.has_entered:
return False
return True
def check_password(self, password: str):
return self.password == password
class History(Base):
__tablename__ = 'history'
class Event(str, enum.Enum):
login = 'login'
logout = 'logout'
user_send_message_to_server = 'user_send_message_to_server'
user_get_message_from_server = 'user_get_message_from_server'
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[int | None] = mapped_column(ForeignKey('user.id'))
user: Mapped[User | None] = relationship(back_populates='histories')
event: Mapped[Event] = mapped_column(sa.Enum(Event))
time: Mapped[dt.datetime]
adress: Mapped[str | None]
class Contact(Base):
__tablename__ = 'contact'
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[int] = mapped_column(
ForeignKey('user.id')
)
user: Mapped[User] = relationship(
back_populates='contacts',
foreign_keys=[user_id]
)
friend_id: Mapped[int] = mapped_column(
ForeignKey('user.id')
)
friend: Mapped[User] = relationship(
back_populates='friends_with_us',
foreign_keys=[friend_id]
)
Base.metadata.create_all(bind=engine)
def create_test_data():
from ..server import test_data
with SessionLocal() as session:
creator = test_data.TestData(session)
creator.create_data_if_not_exist(session)
create_test_data()
|
DemidovEvg/async_chat
|
src/nano_async_chat/async_chat/server/db.py
|
db.py
|
py
| 4,316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38435402089
|
import json
import pandas
def read_json(filename: list) -> dict:
try:
with open(filename, "r") as f:
data = json.loads(f.read())
except:
raise Exception(f"Reading {filename} file encountered an error")
return data
def create_dataframe(data: str) -> pandas.DataFrame:
# Declare an empty dataframe to append records
dataframe = pandas.DataFrame()
# Looping through each record
for d in data['workers']:
# Normalize the column levels
name_details = pandas.json_normalize(d, record_path=['nameDetails'])
contact_details = pandas.json_normalize(d, record_path=['phoneContactDetails'])
email = pandas.json_normalize(d, record_path=['emailContactDetails'],meta=[['employmentSummary','createAccessDate'],['employmentSummary','createAccessTime'],['employmentSummary','mostRecentHireDate']])
record = pandas.json_normalize(d, record_path=['addressDetails'])
job_details = pandas.json_normalize(d, record_path=['jobDetails'],meta=['workerIdentifier'])
new = pandas.concat([name_details,contact_details,email,record,job_details],axis=1,join='inner')
# Append it to the dataframe
dataframe = dataframe.append(new, ignore_index=True)
return dataframe
def main():
# Read the JSON file as python dictionary
data = read_json(filename="work.json")
# Generate the dataframe for the array items in
# details key
dataframe = create_dataframe(data=data['workerDataResponse'])
# Renaming columns of the dataframe
dataframe.columns.to_list()
dataframe.rename(columns={
"employmentSummary.createAccessDate": "accessDate",
"employmentSummary.createAccessTime": "accessTime",
"employmentSummary.mostRecentHireDate": "mostRecentHireDate",
"employmentJobProfileDetails.jobProfileIdentifier": "jobProfileIdentifier",
"jobGovernanceRoleDetails.functionalManagerWorkerIdentifier": "functionalManagerWorkerIdentifier",
"organizationDetails.companyOrganizationIdentifier":"companyOrganizationIdentifier"
}, inplace=True)
dataframe.columns.to_list()
# Convert dataframe to CSV
dataframe.to_csv("emp_data.csv", index=False)
if __name__ == '__main__':
main()
|
PrasadWakle/jsontocsv
|
jsontocsv.py
|
jsontocsv.py
|
py
| 2,340 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17977750270
|
import asyncio
import pickle
import unittest
from typing import AbstractSet, Any, Mapping, Sequence, Union
from testing.types import (
Digits,
I32List,
Integers,
SetI32,
StringBucket,
StrStrMap,
easy,
hard,
)
from thrift.py3.common import Protocol
from thrift.py3.exceptions import Error
from thrift.py3.serializer import (
Transform,
deserialize,
deserialize_from_header,
deserialize_with_length,
serialize,
serialize_iobuf,
serialize_with_header,
serialize_with_header_iobuf,
)
from thrift.py3.types import Struct
class SerializerTests(unittest.TestCase):
def test_with_header_bytes(self) -> None:
control = easy(val=5, val_list=[4, 3, 2, 1])
buf = serialize_with_header(control, transform=Transform.ZSTD_TRANSFORM)
decoded = deserialize_from_header(easy, buf)
self.assertEqual(control, decoded)
def test_with_header_iobuf(self) -> None:
control = easy(val=5, val_list=[4, 3, 2, 1])
iobuf = serialize_with_header_iobuf(control, transform=Transform.ZSTD_TRANSFORM)
decoded = deserialize_from_header(easy, iobuf)
self.assertEqual(control, decoded)
def test_with_header_iobuf_binary(self) -> None:
control = easy(val=6, val_list=[5, 4, 3, 2, 1])
iobuf = serialize_with_header_iobuf(
control, protocol=Protocol.BINARY, transform=Transform.ZLIB_TRANSFORM
)
decoded = deserialize_from_header(easy, iobuf)
self.assertEqual(control, decoded)
def test_with_header_iobuf_json(self) -> None:
control = easy(val=4, val_list=[3, 2, 1])
iobuf = serialize_with_header_iobuf(control, protocol=Protocol.JSON)
decoded = deserialize_from_header(easy, iobuf)
self.assertEqual(control, decoded)
def test_None(self) -> None:
with self.assertRaises(TypeError):
serialize(None, Protocol.JSON) # type: ignore
def test_sanity(self) -> None:
with self.assertRaises(TypeError):
serialize(1, Protocol.COMPACT) # type: ignore
with self.assertRaises(TypeError):
serialize(easy(), None) # type: ignore
with self.assertRaises(TypeError):
deserialize(Protocol, b"") # type: ignore
with self.assertRaises(TypeError):
deserialize(easy, Protocol) # type: ignore
def test_from_thread_pool(self) -> None:
control = easy(val=5, val_list=[1, 2, 3, 4])
loop = asyncio.get_event_loop()
coro = loop.run_in_executor(None, serialize, control)
encoded = loop.run_until_complete(coro)
coro = loop.run_in_executor(None, deserialize, type(control), encoded)
decoded = loop.run_until_complete(coro)
self.assertEqual(control, decoded)
def test_serialize_iobuf(self) -> None:
control = easy(val=5, val_list=[1, 2, 3, 4, 5])
iobuf = serialize_iobuf(control)
decoded = deserialize(type(control), iobuf)
self.assertEqual(control, decoded)
def test_bad_deserialize(self) -> None:
with self.assertRaises(Error):
deserialize(easy, b"", protocol=Protocol.JSON)
with self.assertRaises(Error):
deserialize(easy, b"\x05AAAAAAAA")
with self.assertRaises(Error):
deserialize(easy, b"\x02\xDE\xAD\xBE\xEF", protocol=Protocol.BINARY)
def thrift_serialization_round_robin(
self, control: Struct, fixtures: Mapping[Protocol, bytes]
) -> None:
for proto in Protocol:
encoded = serialize(control, protocol=proto)
self.assertIsInstance(encoded, bytes)
decoded = deserialize(type(control), encoded, protocol=proto)
self.assertIsInstance(decoded, type(control))
self.assertEqual(control, decoded)
self.assertEqual((proto, encoded), (proto, fixtures.get(proto)))
def pickle_round_robin(
self,
# pyre-fixme[2]: Parameter annotation cannot contain `Any`.
control: Union[Struct, Mapping[Any, Any], Sequence[Any], AbstractSet[Any]],
) -> None:
encoded = pickle.dumps(control, protocol=pickle.HIGHEST_PROTOCOL)
decoded = pickle.loads(encoded)
self.assertIsInstance(decoded, type(control))
self.assertEqual(control, decoded)
def test_serialize_easy_struct(self) -> None:
control = easy(val=5, val_list=[1, 2, 3, 4])
fixtures: Mapping[Protocol, bytes] = {
Protocol.COMPACT: b"\x15\n\x19E\x02\x04\x06\x08,\x00\x00",
Protocol.BINARY: b"\x08\x00\x01\x00\x00\x00\x05\x0f\x00\x02\x08\x00\x00\x00"
b"\x04\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00"
b"\x00\x00\x04\x0c\x00\x04\x00\x00",
Protocol.JSON: b'{"val":5,"val_list":[1,2,3,4],"an_int":{}}',
Protocol.COMPACT_JSON: b'{"1":{"i32":5},"2":{"lst":["i32",4,1,2,3,4]},"4"'
b':{"rec":{}}}',
}
self.thrift_serialization_round_robin(control, fixtures)
def test_pickle_easy_struct(self) -> None:
control = easy(val=0, val_list=[5, 6, 7])
self.pickle_round_robin(control)
def test_serialize_hard_struct(self) -> None:
control = hard(
val=0, val_list=[1, 2, 3, 4], name="foo", an_int=Integers(tiny=1)
)
fixtures: Mapping[Protocol, bytes] = {
Protocol.COMPACT: b"\x15\x00\x19E\x02\x04\x06\x08\x18\x03foo\x1c\x13\x01"
b"\x00\x18\x0csome default\x00",
Protocol.BINARY: b"\x08\x00\x01\x00\x00\x00\x00\x0f\x00\x02\x08\x00\x00\x00"
b"\x04\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00"
b"\x00\x00\x04\x0b\x00\x03\x00\x00\x00\x03foo\x0c\x00\x04"
b"\x03\x00\x01\x01\x00\x0b\x00\x05\x00\x00\x00\x0csome def"
b"ault\x00",
Protocol.JSON: b'{"val":0,"val_list":[1,2,3,4],"name":"foo","an_int":{"tiny'
b'":1},"other":"some default"}',
Protocol.COMPACT_JSON: b'{"1":{"i32":0},"2":{"lst":["i32",4,1,2,3,4]},"3":'
b'{"str":"foo"},"4":{"rec":{"1":{"i8":1}}},"5":{"str":"some default"}}',
}
self.thrift_serialization_round_robin(control, fixtures)
def test_pickle_hard_struct(self) -> None:
control = hard(
val=0, val_list=[1, 2, 3, 4], name="foo", an_int=Integers(tiny=1)
)
self.pickle_round_robin(control)
def test_serialize_Integers_union(self) -> None:
control = Integers(medium=1337)
fixtures: Mapping[Protocol, bytes] = {
Protocol.COMPACT: b"5\xf2\x14\x00",
Protocol.BINARY: b"\x08\x00\x03\x00\x00\x059\x00",
Protocol.JSON: b'{"medium":1337}',
Protocol.COMPACT_JSON: b'{"3":{"i32":1337}}',
}
self.thrift_serialization_round_robin(control, fixtures)
def test_pickle_Integers_union(self) -> None:
control = Integers(large=2 ** 32)
self.pickle_round_robin(control)
def test_pickle_sequence(self) -> None:
control = I32List([1, 2, 3, 4])
self.pickle_round_robin(control)
digits = Digits(data=[Integers(tiny=1), Integers(tiny=2), Integers(large=0)])
data = digits.data
assert data
self.pickle_round_robin(data)
def test_pickle_set(self) -> None:
control = SetI32({1, 2, 3, 4})
self.pickle_round_robin(control)
def test_pickle_mapping(self) -> None:
control = StrStrMap({"test": "test", "foo": "bar"})
self.pickle_round_robin(control)
def test_deserialize_with_length(self) -> None:
control = easy(val=5, val_list=[1, 2, 3, 4, 5])
for proto in Protocol:
encoded = serialize(control, protocol=proto)
decoded, length = deserialize_with_length(
type(control), encoded, protocol=proto
)
self.assertIsInstance(decoded, type(control))
self.assertEqual(decoded, control)
self.assertEqual(length, len(encoded))
def test_string_with_non_utf8_data(self) -> None:
encoded = b"\x0b\x00\x01\x00\x00\x00\x03foo\x00"
sb = deserialize(StringBucket, encoded, protocol=Protocol.BINARY)
self.assertEqual("foo", sb.one)
encoded = b"\x0b\x00\x01\x00\x00\x00\x03\xfa\xf0\xef\x00"
sb = deserialize(StringBucket, encoded, protocol=Protocol.BINARY)
with self.assertRaises(UnicodeDecodeError):
# Accessing the property is when the string is decoded as UTF-8.
sb.one
|
WeilerWebServices/Facebook
|
fbthrift/thrift/lib/py3/test/serializer.py
|
serializer.py
|
py
| 8,534 |
python
|
en
|
code
| 3 |
github-code
|
6
|
8344742022
|
import numpy as np
def Sigmoid(z):
h = 1/(1+np.exp(-z))
return z
def gradientDescent(x, y, theta, alpha, num_iter):
m = x.shape[0]
for i in range(0, num_iter):
z = np.dot(x, theta)
h = Sigmoid(z)
J = (-1/m)*((np.dot(y.T, np.log(h))) + (np.dot((1-y).T, np.log(1-h))))
theta = theta - (alpha/m) * (np.dot(x.T, (h-y)))
J = float(J)
return J, theta
|
Narayan-21/NLP-Specialization
|
Sentiment Analysis using logistic regression/utils.py
|
utils.py
|
py
| 404 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32259974513
|
### SPDX-License-Identifier: GPL-2.0-or-later
"""Parse phc2sys log messages"""
import re
from collections import namedtuple
from .parser import (Parser, parse_decimal)
class TimeErrorParser(Parser):
"""Parse time error from a phc2sys log message"""
id_ = 'phc2sys/time-error'
elems = ('timestamp', 'terror', 'state', 'delay')
y_name = 'terror'
parsed = namedtuple('Parsed', elems)
@staticmethod
def build_regexp():
"""Return a regular expression string for parsing phc2sys log file lines"""
return r'\s'.join((r'^phc2sys'
+ r'\[([1-9][0-9]*\.[0-9]{3})\]:' # timestamp
+ r'(?:\s\[ptp4l\.\d\..*\])?', # configuration file name
r'CLOCK_REALTIME phc offset\s*',
r'(-?[0-9]+)', # time error
r'(\S+)', # state
r'freq\s*',
r'([-+]?[0-9]+)', # frequency error
r'delay\s*',
r'(-?[0-9]+)' # delay
+ r'\s*.*$'))
def __init__(self):
super().__init__()
self._regexp = re.compile(self.build_regexp())
def make_parsed(self, elems):
if len(elems) < len(self.elems):
raise ValueError(elems)
timestamp = parse_decimal(elems[0])
terror = int(elems[1])
state = str(elems[2])
delay = int(elems[3])
return self.parsed(timestamp, terror, state, delay)
def parse_line(self, line):
matched = self._regexp.match(line)
if matched:
return self.make_parsed((
matched.group(1),
matched.group(2),
matched.group(3),
matched.group(5),
))
return None
|
redhat-partner-solutions/vse-sync-pp
|
src/vse_sync_pp/parsers/phc2sys.py
|
phc2sys.py
|
py
| 1,846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4159403258
|
import math
import random
import vector3
from rtweekend import random_double
import multiprocessing
from multiprocessing import Process, Array
from ctypes import c_char_p
from color import write_color
from vector3 import vec3, random_in_hemisphere
from ray import ray
import rtweekend
from hittable import hit_record
from hittable_list import hit_ls
from sphere import sphere
from camera import camera
from material import lambertian, metal, dielectric
#image width
def multi_render(return_string, id, fromI, toI, image_height, image_width ,samples_per_pixel, cam, world, max_depth):
for j in range( toI , fromI -1,-1):
for i in range(0,image_width):
pixel_color = vec3(0,0,0)
for _ in range(samples_per_pixel):
u = (i + random.random()) / (image_width-1)
v = (j + random.random()) / (image_height-1)
r = cam.get_ray(u, v)
pixel_color = pixel_color + ray_color(r, world, max_depth)
return_string[id] += write_color(pixel_color, samples_per_pixel)
def random_scene():
world = []
ground_material = lambertian(vec3(0.5, 0.5, 0.5))
world.append(sphere(vec3(0,-1000,0), 1000, ground_material))
for a in range(-11,11,1):
for b in range(-11,11,1):
choose_mat = random.random()
center = vec3(a + 0.9*random.random(), 0.2, b + 0.9*random.random())
if((center - vec3(4, 0.2, 0)).length() > 0.9):
if (choose_mat < 0.8):
#diffuse
albedo = vector3.random().mult( vector3.random())
sphere_material = lambertian(albedo)
world.append(sphere(center, 0.2, sphere_material))
elif (choose_mat < 0.95):
#metal
albedo = vector3.random(0.5, 1)
fuzz = random_double(0, 0.5)
sphere_material = metal(albedo, fuzz)
world.append(sphere(center, 0.2, sphere_material))
else:
#glass
sphere_material = dielectric(1.5)
world.append(sphere(center, 0.2, sphere_material))
material1 = dielectric(1.5)
world.append(sphere(vec3(0, 1, 0), 1.0, material1))
material2 = lambertian(vec3(0.4, 0.2, 0.1))
world.append(sphere(vec3(-4, 1, 0), 1.0, material2))
material3 = metal(vec3(0.7, 0.6, 0.5), 0.0)
world.append(sphere(vec3(4, 1, 0), 1.0, material3))
return world
def ray_color(r, world, depth):
rec = hit_record(vec3(0,0,0), vec3(0,0,0), None, 0.0, False)
if depth <= 0:
return vec3(0,0,0)
hit_anything, rec = hit_ls(world, r, 0.001, rtweekend.infinity, rec)
if hit_anything:
scat, scattered, attenuation = rec.mat_ptr.scatter(r,rec)
if scat:
return ray_color(scattered, world,depth-1).mult(attenuation)
return vec3(0,0,0)
unit_direction = r.get_direction().unit_vector()
t = 0.5 * (unit_direction.y() + 1.0)
return vec3(1,1,1)*(1-t) + vec3(0.5,0.7,1.0)*t
if __name__ == '__main__':
#Image
aspect_ratio = 3.0 / 2.0
image_width = 384 # optimised size for an 8-core CPU
image_height = int(image_width / aspect_ratio)
samples_per_pixel = 50
max_depth = 50
#World
world = random_scene()
# camera
lookfrom = vec3(13,2,3)
lookat = vec3(0,0,0)
vup = vec3(0,1,0)
dist_to_focus = 10.0
aperture = 0.1
cam = camera(lookfrom, lookat, vup, 20, aspect_ratio, aperture, dist_to_focus)
# render
result_string = ""
result_string += "P3 \n" + str(image_width) + ' ' + str(image_height) + "\n255\n"
number_of_cores = multiprocessing.cpu_count()
process = []
manager = multiprocessing.Manager()
return_str = manager.dict()
for i in range(number_of_cores):
return_str[i] = ''
process.append(Process(target = multi_render, args=(return_str,i,int(i*image_height/number_of_cores), int((i+1)*image_height/number_of_cores), image_height, image_width,samples_per_pixel, cam, world, max_depth),))
process[i].start()
for i in range(number_of_cores):
process[i].join()
for i in range(number_of_cores-1, -1, -1):
result_string += return_str[i]
with open('image.ppm', 'w') as f:
f.write(result_string)
f.close()
|
mk2510/ray_tracing_project
|
raytracing_in_a_weekend/main.py
|
main.py
|
py
| 4,391 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75275385466
|
from datetime import datetime
from time import process_time
# file = open(address, mood)
# with open('oi.txt', 'r', encoding='utf-8') as file:
# content = file.read()
# print(content)
with open('log.txt', 'w', encoding='utf-8') as file:
file.write('Horรกrios de log dos funcionรกrios')
# with open('log.txt', 'r', encoding='utf-8') as file:
# content = file.read()
# print(content)
status = False
tempo_trabalhado = 0
answer = input('Quer entrar no sistema? ').lower()
if answer == 'sim':
status = True
t1 = process_time()
name = input('Digite seu nome: ').upper()
with open('log.txt', 'a', encoding='utf-8') as file:
date_now = datetime.now()
log = date_now.strftime('%d-%m-%Y %H:%M:%S')
file.write(f'\n{name} entrou {log}')
if status:
answer = input('Quer sair do sistema? ').lower()
if answer == 'sim':
status = False
t2 = process_time()
tempo_trabalhado += (t2-t1)
with open('log.txt', 'a', encoding='utf-8') as file:
date_now = datetime.now()
log = date_now.strftime('%d-%m-%Y %H:%M:%S')
file.write(f'\n{name} saiu {log}')
with open('log.txt', 'r', encoding='utf-8') as file:
content = file.read()
print(content)
print(tempo_trabalhado)
|
ewertonpereira/python
|
test/testing.py
|
testing.py
|
py
| 1,302 |
python
|
en
|
code
| 2 |
github-code
|
6
|
7305140298
|
#!/usr/bin/python3
#author:@al_vyn
#written: 25/06/2018
import csv
#Create dictionaries
lloyd = {
"name": "Lloyd",
"homework": [90.0, 97.0, 75.0, 92.0],
"quizzes": [88.0, 40.0, 94.0],
"tests": [75.0, 90.0]
}
alice = {
"name": "Alice",
"homework": [100.0, 92.0, 98.0, 100.0],
"quizzes": [82.0, 83.0, 91.0],
"tests": [89.0, 97.0]
}
tyler = {
"name": "Tyler",
"homework": [0.0, 87.0, 75.0, 22.0],
"quizzes": [0.0, 75.0, 78.0],
"tests": [100.0, 100.0]
}
alvyn = {
"name":"alvyn",
"homework": [92.0,98.0,92.0,94.0],
"quizzes": [90.0,80,0,85.0],
"tests": [95.0,93.0]
}
students = [alvyn,tyler,lloyd,alice]
#Adding functions
def banner(text, ch='=', length=78):
spaced_text = '%s' % text
banner = spaced_text.center(length, ch)
return banner
def average(numbers):
total = sum(numbers)
total = float(total)
result = total/len(numbers)
return result
def get_average(students):
homework = average(students["homework"])
quizzes = average(students["quizzes"])
tests = average(students["tests"])
var = 0.1*homework + 0.3*quizzes + 0.6*tests
return var
def get_letter_grade(score):
if score >= 90:
return "A"
elif 80 <= score < 90:
return "B"
elif 70 <= score < 80:
return "C"
elif 60 <= score < 70:
return "D"
else:
return "F"
def get_class_average(students):
results = []
for student in students:
avg = get_average(student)
results.append(avg)
return average(results)
#alvyn's average data
alvyn_hw = average(alvyn["homework"])
alvyn_qz = average(alvyn["quizzes"])
alvyn_ts = average(alvyn["tests"])
#alice's average data
alice_hw = average(alice["homework"])
alice_qz = average(alice["quizzes"])
alice_ts = average(alice["tests"])
#tyler's average data
tyler_hw = average(tyler["homework"])
tyler_qz = average(tyler["quizzes"])
tyler_ts = average(tyler["tests"])
#lloyd's average data
lloyd_hw = average(lloyd["homework"])
lloyd_qz = average(lloyd["quizzes"])
lloyd_ts = average(lloyd["tests"])
#write the results to a csv file
print (banner('GradeBook'))
print ("\n")
print ("[+]-->A python script that calculates averages and writes the data to a csv file")
with open('C:\\Users\\User\\Documents\\project\\results.csv', 'w') as csvfile:
fieldnames = ['S/N', 'NAME', 'HOMEWORK', 'QUIZZES', 'TESTS', 'REMARKS']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
writer.writeheader()
writer.writerow({'S/N':1, 'NAME':'Alvyn', 'HOMEWORK': alvyn_hw, 'QUIZZES': alvyn_qz, 'TESTS': alvyn_ts, 'REMARKS':""})
writer.writerow({'S/N':2, 'NAME':'Alice', 'HOMEWORK': alice_hw, 'QUIZZES': alice_qz, 'TESTS': alice_ts, 'REMARKS':""})
writer.writerow({'S/N':3, 'NAME':'Lloyd', 'HOMEWORK': lloyd_hw, 'QUIZZES': lloyd_qz, 'TESTS': lloyd_ts, 'REMARKS':""})
writer.writerow({'S/N':4, 'NAME':'Tyler', 'HOMEWORK': tyler_hw, 'QUIZZES': tyler_qz, 'TESTS': tyler_ts, 'REMARKS':""})
#read and display the results csv file
with open('C:\\Users\\User\\Documents\\project\\results.csv', 'r') as csvfile:
data = csv.reader(csvfile, delimiter = ' ', quotechar = '|')
for row in data:
print (','.join(row))
with open('C:\\Users\\User\\Documents\\project\\results.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print (row['HOMEWORK'], row['QUIZZES'], row['TESTS'], row['REMARKS'])
print ("\n")
print (banner('cha0s'))
|
alvyn96/GradeBook
|
gradebook.py
|
gradebook.py
|
py
| 3,471 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27315049620
|
import facebook
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import random
token='EAACEdEose0cBAHYBMbXyW9HwyJJIeCFBaWXEcLjsp3N0vB5HZApZCxqm7KQvVxb4fgF2ZA8nh625ZBJR3NzCMGc3ApU1MyZCYBwVF85LWxqdaEdt3cNVaS0y9CYsY4DDUjGcUeDZB0TMZBJwqdEBCZBClU00PeeMqnWmMpZCWCUFGmp12hZBZA3mLilYc450f4cWvkZD'
graph=facebook.GraphAPI(token)
profile=graph.get_object("me")
posts = graph.get_connections(profile['id'], 'posts')
messages=[]
for post in posts['data']:
try:
messages.append(post['message'])
except:
continue
wordlist=[]
wordfr=[]
s=" "
for m in messages:
words=m.split()
for w in words:
s=s+" "+w
wordlist.append(w)
print(w)
for w in wordlist:
wordfr.append(wordlist.count(w))
print("List\n" + str(wordlist) + "\n")
print("Frequencies\n" + str(wordfr) + "\n")
print("Pairs\n" + str(zip(wordlist, wordfr)))
wordcloud = WordCloud(relative_scaling = 1.0,stopwords = 'to of').generate(s)
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
|
aparnamnn/ACM-Project
|
wordclouds.py
|
wordclouds.py
|
py
| 1,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8947958268
|
from django.db import models
import ast
class ListField(models.TextField):
__metaclass__ = models.SubfieldBase
description = "Stores a python list"
def __init__(self, *args, **kwargs):
super(ListField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
value = []
if isinstance(value, list):
return value
return ast.literal_eval(value)
def get_prep_value(self, value):
if value is None:
return value
return unicode(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
class Student(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
netID = models.CharField(max_length=8, unique=True)
# blocks = ListField('Busy blocks',blank=True)
def blocks(self):
blks = []
for course in self.course_set.all():
for blk in course.blocks:
blks.append(blk)
return blks
def __unicode__(self): # Python 3: def __str__(self):
return self.netID
class Instructor(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
netID = models.CharField(max_length=8, unique=True)
faculty = models.BooleanField(default=False)
def __unicode__(self): # Python 3: def __str__(self):
return self.netID
def billable(self):
if self.faculty:
return 0
my_courses = self.course_set.all()
hours = 0.0
for course in my_courses:
hours += len(course.blocks)/2.0
return hours
def full_name(self):
return self.first_name+" "+self.last_name
class Course(models.Model):
courseID = models.CharField(max_length=20, unique=True)
title = models.CharField(max_length=75,default='Title needed')
description = models.TextField(max_length=1000)
other_section = models.ManyToManyField('self', blank=True)
min_enroll = models.IntegerField(default=0)
max_enroll = models.IntegerField(default=200)
cancelled = models.BooleanField(default=False)
room = models.CharField(max_length=200,default='tbd')
blocks = ListField('Course blocks')
schedule = models.CharField(max_length=50)
students = models.ManyToManyField(Student, through='Registration')
instructors = models.ManyToManyField(Instructor)
def __unicode__(self): # Python 3: def __str__(self):
return self.courseID
def current_enroll(self):
return len(self.students.all())
def is_full(self):
num_enroll = self.current_enroll()
return num_enroll >= self.max_enroll
def meets_min_requirements(self):
num_enroll = self.current_enroll()
return num_enroll >= self.min_enroll
def get_instructors(self):
return ", ".join([i.full_name() for i in self.instructors.all()])
is_full.boolean = True
meets_min_requirements.boolean = True
class Registration(models.Model):
student = models.ForeignKey(Student)
course = models.ForeignKey(Course)
timestamp = models.DateTimeField('Registration timestamp',auto_now_add=True)
attendance_M = models.BooleanField(default=False)
attendance_Tu = models.BooleanField(default=False)
attendance_W = models.BooleanField(default=False)
attendance_Th = models.BooleanField(default=False)
attendance_F = models.BooleanField(default=False)
def __unicode__(self): # Python 3: def __str__(self):
return self.student.netID+"-"+self.course.courseID
|
epkugelmass/USG-srv-dev
|
tigerapps/wintersession/models.py
|
models.py
|
py
| 3,678 |
python
|
en
|
code
| null |
github-code
|
6
|
39795452637
|
# coding=utf-8
import requests
import re
import execjs
import json
from bs4 import BeautifulSoup
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
sendAddress = ''
emailPsw = ''
receiveAddress = ''
username = ''
psw = ''
def loadConfig():
with open('config.json', 'r', encoding='utf-8') as f:
config = f.read()
configJson = json.loads(config)
print(configJson)
global sendAddress, emailPsw, receiveAddress, username, psw
sendAddress = configJson['sendAddress']
emailPsw = configJson['emailPsw']
receiveAddress = configJson['receiveAddress']
username = configJson['username']
psw = configJson['psw']
def sendEmail(msgJson):
try:
stuName = msgJson['data']['owner']['name']
info = stuName + '\nๆจๅทฒๆๅกๆๅ'
except:
info = 'ๆๅกๅคฑ่ดฅ\n่ฏฆ็ปไฟกๆฏ:' + str(msgJson)
msg = MIMEText(info, 'plain', 'utf-8') # ๅกซๅ้ฎไปถๅ
ๅฎน
msg['From'] = formataddr(["ๅฆ้จๅคงๅญฆๅฅๅบทๆๅก", sendAddress]) # ๆฌๅท้็ๅฏนๅบๅไปถไบบ้ฎ็ฎฑๆต็งฐใๅไปถไบบ้ฎ็ฎฑ่ดฆๅท
msg['To'] = formataddr([receiveAddress, receiveAddress]) # ๆฌๅท้็ๅฏนๅบๆถไปถไบบ้ฎ็ฎฑๆต็งฐใๆถไปถไบบ้ฎ็ฎฑ่ดฆๅท
msg['Subject'] = "ๅฆ้จๅคงๅญฆๅฅๅบทๆๅก" # ้ฎไปถ็ไธป้ข๏ผไนๅฏไปฅ่ฏดๆฏๆ ้ข
server = smtplib.SMTP_SSL("smtp.qq.com", 465) # ๅไปถไบบ้ฎ็ฎฑไธญ็SMTPๆๅกๅจ
server.login(sendAddress, emailPsw) # ๆฌๅทไธญๅฏนๅบ็ๆฏๅไปถไบบ้ฎ็ฎฑ่ดฆๅทใ้ฎ็ฎฑๆๆ็
server.sendmail(sendAddress, [receiveAddress, ], msg.as_string()) # ๆฌๅทไธญๅฏนๅบ็ๆฏๅไปถไบบ้ฎ็ฎฑ่ดฆๅทใๆถไปถไบบ้ฎ็ฎฑ่ดฆๅทใๅ้้ฎไปถ
server.quit() # ๅ
ณ้ญ่ฟๆฅ
def encrypt(pwd, key):
"""
่ฐ็จjsๅ ๅฏๅฝๆฐ
:param pwd:
:param key:
:return:
"""
with open('encrypt.js', 'r', encoding='utf-8') as f:
j = f.read()
js = execjs.compile(j)
return js.call('encryptAES', pwd, key)
def getDataFrame(session, headers, businessId):
"""
่ทๅพๆๅก็post jsonๆกๆถ
:param session:
:param headers:
:param businessId:
:return: ๆกๆถ
"""
authorityMap = {'readonly': {'hide': 'true', "readonly": 'false'}, 'hide': {"hide": 'true', "readonly": 'false'},
'required': {'hide': 'true', "readonly": 'false'},
'optional': {'hide': 'true', "readonly": 'false'}}
list = []
dataFrameJson = session.get(
'https://xmuxg.xmu.edu.cn/api/formEngine/business/' + str(businessId) + '/formRenderData?playerId=owner',
headers=headers).json()['data']['components']
for data in dataFrameJson:
tempDict = {}
tempDict.update({"name": data['name']})
tempDict.update({"title": data['title']})
tempDict.update({'value': {}})
tempDict.update(authorityMap[data['properties']['authority']])
list.append(tempDict)
return {"formData": list, "playerId": "owner"}
def injectPersonalData(formDataJson, personalDataList):
"""
ๅฐไธชไบบไฟกๆฏๆณจๅ
ฅๅฐformDataๅ
ๅนถไฟฎๆนไธบๅทฒๆๅก
:param formDataJson:
:param personalDataList:
:return: ๆณจๅ
ฅๅผ็ๆกๆถ
"""
dataMap = {} # ๅปบ็ซtitleไธvalueๆ ๅฐ่กจ
for personalData in personalDataList:
valueData = {}
# ๅฐๅๅญๆฎต
if (personalData['value']['dataType'] == "ADDRESS_VALUE"):
valueData.update({'addressValue': personalData['value']['addressValue']})
# ๆฎ้ๅญๆฎต
elif (personalData['value']['dataType'] == "STRING"):
valueData.update({'stringValue': personalData['value']['stringValue']})
# ๆถ้ดๅญๆฎต
elif (personalData['value']['dataType'] == "DATE"):
valueData.update({'dateValue': personalData['value']['dateValue']})
dataMap.update({personalData['title']: valueData})
# ไฟฎๆนไธบๅทฒๆๅก
title1 = 'Can you hereby declare that all the information provided is all true and accurate and there is no concealment, false information or omission. ๆฌไบบๆฏๅฆๆฟ่ฏบๆๅกซๆฅ็ๅ
จ้จๅ
ๅฎนๅๅฑๅฎใๅ็กฎ๏ผไธๅญๅจไปปไฝ้็ๅไธๅฎ็ๆ
ๅต๏ผๆดๆ ้ๆผไนๅคใ'
dataMap[title1]['stringValue'] = "ๆฏ Yes"
title2 = 'ๅญฆ็ๆฌไบบๆฏๅฆๅกซๅ'
dataMap[title2]['stringValue'] = 'ๆฏ'
# ๅฐvalueๆณจๅฐ่ฟlist
list = formDataJson['formData']
for i in range(0, list.__len__()):
# ๅฆๆๆๆญคๅญๆฎต็valueๅๆณจๅ
ฅ
if (dataMap.__contains__(list[i]['title'])):
list[i]['value'] = dataMap[list[i]['title']]
return {"formData": list, "playerId": "owner"}
if __name__ == '__main__':
# ๅ ่ฝฝ้
็ฝฎ
loadConfig()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
}
s = requests.session()
response = s.get('https://ids.xmu.edu.cn/authserver/login?service=https://xmuxg.xmu.edu.cn/login/cas/xmu')
HTML = BeautifulSoup(response.text, 'html.parser')
pwdDefaultEncryptSalt = HTML.find_all('input', attrs={'id': 'pwdDefaultEncryptSalt'})[0].attrs['value']
lt = HTML.find_all('input', attrs={'name': 'lt'})[0].attrs['value']
dllt = HTML.find_all('input', attrs={'name': 'dllt'})[0].attrs['value']
execution = HTML.find_all('input', attrs={'name': 'execution'})[0].attrs['value']
_eventId = HTML.find_all('input', attrs={'name': '_eventId'})[0].attrs['value']
rmShown = HTML.find_all('input', attrs={'name': 'rmShown'})[0].attrs['value']
encryptPsw = encrypt(psw, pwdDefaultEncryptSalt)
body = {'username': username,
'password': encryptPsw,
'lt': lt,
'dllt': dllt,
'execution': execution,
'_eventId': _eventId,
'rmShown': rmShown}
s.post('https://ids.xmu.edu.cn/authserver/login?service=https://xmuxg.xmu.edu.cn/login/cas/xmu', data=body,
headers=headers)
r1 = s.get('https://xmuxg.xmu.edu.cn/api/app/214/business/now?getFirst=true', headers=headers)
print(r1.text)
businessId = r1.json()['data'][0]['business']['id']
businessId = businessId
# ่ทๅพๆกๆถ
formDataJson = getDataFrame(s, headers, businessId)
# ่ทๅพไธชไบบไฟกๆฏ
r2Json = s.get(
'https://xmuxg.xmu.edu.cn/api/formEngine/business/' + str(businessId) + '/myFormInstance').json()
# ๆณจๅ
ฅไธชไบบไฟกๆฏ
formData = injectPersonalData(formDataJson, r2Json['data']['formData'])
# ๆๅกpost็url
instanceId = r2Json['data']['id']
form_url = f'https://xmuxg.xmu.edu.cn/api/formEngine/formInstance/' + instanceId
# ๆๅก
resp = s.post(form_url, json=formData, headers=headers)
sendEmail(resp.json())
|
mawangdan/XMUDaliyReport
|
src/main.py
|
main.py
|
py
| 6,736 |
python
|
en
|
code
| 15 |
github-code
|
6
|
8502338250
|
import torch
import torch.nn as nn
from Descriptor import Descriptor
from Recovery_Submodule import R_t, Pyramid_maxout
class TR(nn.Module):
# translucency recovery(TR) module
def __init__(self, input_channel=3, beta=4, gamma=4):
super(TR, self).__init__()
self.D_t = Descriptor(input_channel, gamma)
self.R_t = R_t(385, beta)
def forward(self, x, **kwargs):
f_t = self.D_t(x)
y_, f_c, z_hat, a = self.R_t(x, f_t, **kwargs)
return y_, f_c, z_hat, a
class TR_new(nn.Module):
# A new translucency recovery(TR) module with two descriptors
def __init__(self, input_channel=3, beta=4, gamma=4):
super(TR_new, self).__init__()
self.D_t_1 = Descriptor(input_channel, gamma)
self.D_t_2 = Descriptor(input_channel, gamma)
self.SE = Pyramid_maxout(385, 1, beta)
self.AE = Pyramid_maxout(385, 3, beta)
def forward(self, x, **kwargs):
f_t_1 = self.D_t_1(x)
z_hat = self.SE(f_t_1)
z_hat[z_hat >= 1] = 1
z_hat[z_hat <= 0] = 0
z_hat_ = z_hat.detach()
f_t_2 = self.D_t_2(x)
a = self.AE(f_t_2)
# yield estimated snow-free image y'
y_ = (z_hat_ < 1) * (x - a * z_hat_) / (1 - z_hat_ + 1e-8) + (z_hat_ == 1) * x
y_[y_ >= 1] = 1
y_[y_ <= 0] = 0
# yield feature map f_c
f_c = torch.cat([y_, z_hat_, a], dim=1)
return y_, f_c, z_hat, a
class TR_za(nn.Module):
# A translucency recovery(TR) module predict z\times a
def __init__(self, input_channel=3, beta=4, gamma=4):
super(TR_za, self).__init__()
self.D_t = Descriptor(input_channel, gamma)
self.SE = Pyramid_maxout(385, 1, beta)
self.SAE = Pyramid_maxout(385, 3, beta)
def forward(self, x, **kwargs):
f_t = self.D_t(x)
z_hat = self.SE(f_t)
za = self.SAE(f_t)
z_hat[z_hat >= 1] = 1
z_hat[z_hat <= 0] = 0
za[za >= 1] = 1
za[za <= 0] = 0
# yield estimated snow-free image y'
y_ = (z_hat < 1) * (x - za) / (1 - z_hat + 1e-8) + (z_hat == 1) * x
y_[y_ >= 1] = 1
y_[y_ <= 0] = 0
# yield feature map f_c
f_c = torch.cat([y_, z_hat, za], dim=1)
return y_, f_c, z_hat, za
class RG(nn.Module):
# the residual generation (RG) module
def __init__(self, input_channel=7, beta=4, gamma=4):
super(RG, self).__init__()
self.D_r = Descriptor(input_channel, gamma)
block = []
for i in range(beta):
block.append(nn.Conv2d(385, 3, 2 * i + 1, 1, padding=i))
self.conv_module = nn.ModuleList(block)
self.activation = nn.Tanh()
def forward(self, f_c):
f_r = self.D_r(f_c)
for i, module in enumerate(self.conv_module):
if i == 0:
r = module(f_r)
else:
r += r + module(f_r)
r = self.activation(r)
return r
class DesnowNet(nn.Module):
# the DesnowNet
def __init__(self, input_channel=3, beta=4, gamma=4, mode='original'):
super(DesnowNet, self).__init__()
if mode == 'original':
self.TR = TR(input_channel, beta, gamma)
elif mode == 'new_descriptor':
self.TR = TR_new(input_channel, beta, gamma)
elif mode == 'za':
self.TR = TR_za(input_channel, beta, gamma)
else:
raise ValueError("Invalid architectural mode")
self.RG = RG(beta=beta, gamma=gamma)
def forward(self, x, **kwargs):
y_, f_c, z_hat, a = self.TR(x, **kwargs)
r = self.RG(f_c)
y_hat = r + y_
return y_hat, y_, z_hat, a
if __name__ == '__main__':
device = 'cuda'
net = DesnowNet().to(device)
mask = torch.zeros([2, 1, 64, 64]).to(device)
img = torch.zeros([2, 3, 64, 64]).to(device)
y_hat, y_, z_hat, a = net(img, mask=mask)
y_hat.mean().backward()
print("finished")
|
linYDTHU/DesnowNet_Context-Aware_Deep_Network_for_Snow_Removal
|
network/DesnowNet.py
|
DesnowNet.py
|
py
| 3,956 |
python
|
en
|
code
| 15 |
github-code
|
6
|
25414336083
|
"""
2.12 Vison Local Server Test
MIT 2.12 Intro To Robotics 2014
Daniel J. Gonzalez - [email protected]
"""
serverIP = 'localhost' #Use if loopback testing on your own computer
#serverIP = '192.168.1.212' #Use if this code is running over a 2.12 Server
################# DO NOT EDIT ANYTHING BELOW #########################
import threading,SocketServer,time
import signal
import sys
import struct
tStart = time.time()
timestamp = tStart
n = 0
def signal_handler(signal, frame):
print('Closing...')
server.socket.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
class requestHandler(SocketServer.StreamRequestHandler):
def handle(self):
requestForUpdate=self.request.recv(256)
print(self.client_address)
while requestForUpdate!='':
state = [1000, 2000, 3000, 4000, 5000, 6000]
data1 = ''.join([struct.pack('>H',x) for x in state])
data2 = struct.pack('>f',timestamp)
self.wfile.write(data1+data2)
requestForUpdate=self.request.recv(256)
print('client disconnect')
class broadcastServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == '__main__':
server=broadcastServer((serverIP,2121),requestHandler)
t = threading.Thread(target=server.serve_forever)
t.daemon=True
t.start()
print('server start')
n=0
while True:
timestamp = time.time() - tStart
n+=1
time.sleep(.001)
server.socket.close()
|
skyleradams/tim-howard
|
Vision/testServer.py
|
testServer.py
|
py
| 1,577 |
python
|
en
|
code
| 5 |
github-code
|
6
|
7422093495
|
from operator import add
from itertools import chain, combinations
from functools import reduce
import math
import numpy as np
from scipy import ndimage
from tkinter import *
class GF2(object):
def __init__(self, a=0):
self.value = int(a) & 1
def __add__(self, rhs):
return GF2(self.value + GF2(rhs).value)
def __mul__(self, rhs):
return GF2(self.value * GF2(rhs).value)
def __sub__(self, rhs):
return GF2(self.value - GF2(rhs).value)
def __truediv__(self, rhs):
return GF2(self.value / GF2(rhs).value)
def __repr__(self):
return str(self.value)
def __eq__(self, rhs):
if isinstance(rhs, GF2):
return self.value == rhs.value
return self.value == rhs
def __le__(self, rhs):
if isinstance(rhs, GF2):
return self.value <= rhs.value
return self.value <= rhs
def __lt__(self, rhs):
if isinstance(rhs, GF2):
return self.value < rhs.value
return self.value < rhs
def __int__(self):
return self.value
def __long__(self):
return self.value
GF2array = np.vectorize(GF2)
def gjel(A):
nulldim = 0
for i, row1 in enumerate(A):
pivot = A[i:, i].argmax() + i
if A[pivot, i] == 0:
nulldim = len(A) - i
break
new_row = A[pivot] / A[pivot, i]
A[pivot] = A[i]
row1[:] = new_row
for j, row2 in enumerate(A):
if j == i:
continue
row2[:] -= new_row*A[j, i]
return A, nulldim
def GF2inv(A):
n = len(A)
assert n == A.shape[1], "Matrix must be square"
A = np.hstack([A, np.eye(n)])
B, nulldim = gjel(GF2array(A))
inverse = np.int_(B[-n:, -n:])
E = B[:n, :n]
null_vectors = []
if nulldim > 0:
null_vectors = E[:, -nulldim:]
null_vectors[-nulldim:, :] = GF2array(np.eye(nulldim))
null_vectors = np.int_(null_vectors.T)
return inverse, null_vectors
def lightsoutbase(n):
a = np.eye(n*n)
a = np.reshape(a, (n*n, n, n))
a = np.array(list(map(ndimage.binary_dilation, a)))
return np.reshape(a, (n*n, n*n))
def powerset(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
class LightsOut(object):
def __init__(self, size=5):
self.n = size
self.base = lightsoutbase(self.n)
self.invbase, self.null_vectors = GF2inv(self.base)
def solve(self, b):
b = np.asarray(b)
assert b.shape[0] == b.shape[1] == self.n, "incompatible shape"
if not self.issolvable(b):
raise ValueError("The given setup is not solvable")
first = np.dot(self.invbase, b.ravel()) & 1
solutions = [(first + reduce(add, nvs, 0)) & 1 for nvs in powerset(self.null_vectors)]
final = min(solutions, key=lambda x: x.sum())
return np.reshape(final, (self.n, self.n))
def issolvable(self, b):
b = np.asarray(b)
assert b.shape[0] == b.shape[1] == self.n, "incompatible shape"
b = b.ravel()
p = [np.dot(x, b) & 1 for x in self.null_vectors]
return not any(p)
def text_to_mat(gridtxt, invert=True):
gridlist = [int(s) for s in list(gridtxt)]
shape = np.sqrt(len(gridlist))
if shape%1 != 0:
print("input matrix is not square.")
return 1
shape = int(shape)
matlist = [gridlist[i: i+shape] for i in range(0, len(gridlist), shape)]
mat = np.array(matlist)
if invert:
mat = 1-mat
return mat
def mat_to_text(mat, invert=False):
s = ""
for i in mat:
for j in i:
if invert:
s += str(1-j)
else:
s += str(j)
return s
def text_solver(gridtxt):
mat_inv = text_to_mat(gridtxt, True)
if type(mat_inv) == int:
return 1
lo = LightsOut(3)
try:
bsol = lo.solve(mat_inv)
except:
print("Error in determining solution")
return 1
return bsol
master = Tk()
master_gridtxt = StringVar(value="000000000")
master.title("DVa's Puzzle Solver")
master.geometry("400x115")
master.resizable(width=False, height=False)
check_size = 25
check_on = PhotoImage(width=check_size, height=check_size)
check_off = PhotoImage(width=check_size, height=check_size)
check_on.put(("green"), to=(0,0,check_size,check_size))
check_off.put(("red"), to=(0,0,check_size,check_size))
label_text = StringVar()
def update_gridtxt():
b_solve['state'] = NORMAL
master_gridtxt.set("")
for i in range(9):
s = str(globals()[f"b_state{i}"].get())
master_gridtxt.set(master_gridtxt.get() + s)
def reset_boxes():
for i in range(9):
globals()[f"b_state{i}"].set(0)
label_text.set("")
b_solve['state'] = NORMAL
def final_wrapper(gridtxt):
mat = text_solver(gridtxt)
gridtxt_final = mat_to_text(mat)
reset_boxes()
for idx, i in enumerate(gridtxt_final):
if i == "1":
globals()[f"b{idx}"].select()
b_solve['state'] = DISABLED
label_text.set("Solved. Shoot the lamps marked with green boxes.")
for i in range(9):
j = i+1
col = i%3
row = math.ceil(j/3)
globals()[f"b_state{i}"] = IntVar()
globals()[f"b{i}"] = Checkbutton(master, variable=globals()[f"b_state{i}"],
image=check_off, selectimage=check_on, indicatoron=False,
onvalue=1, offvalue=0, command=update_gridtxt)
globals()[f"b{i}"].grid(row=row, column=col, padx=1, pady=1)
b_solve = Button(master, text="Solve", command=lambda:final_wrapper(master_gridtxt.get()), anchor="w")
b_solve.grid(row=1, column=4, padx=1, pady=1, sticky="w")
b_reset = Button(master, text="Reset", command=reset_boxes, anchor="w")
b_reset.grid(row=2, column=4, padx=1, pady=1, sticky="w")
lbl = Label(master, textvariable=label_text, anchor="w")
lbl.grid(row=3, column=4, padx=1, pady=1)
master.mainloop()
|
ThaumielSparrow/switch-solver
|
lights_on.py
|
lights_on.py
|
py
| 6,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21253145382
|
from django.shortcuts import render
from django.views.generic import View #ๅฏผๅ
ฅView
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from wanwenyc.settings import DJANGO_SERVER_YUMING,MEDIA_ROOT
from .models import RdmAutoStatic,RdmStatic,RdmConfig
# Create your views here.
#ๆ นๆฎๆฐๆฎๅบๅ
ๅฎน่ชๅจๅๅนถ็ๆไปปๅกๅ็งฐๅไปปๅก่ฏฆๆ
ๅ้ฎ้ข่ฏฆๆ
def RdmAutoStaticRequest(request, rdmautostatic_id, trackback=None):
rdmautostatic = RdmAutoStatic.objects.get(id=int(rdmautostatic_id)) # ่ทๅ็จไพ
people_name = rdmautostatic.people_name
start_date = str(rdmautostatic.start_date)
end_date = str(rdmautostatic.end_date)
print(people_name)
print(start_date)
print(end_date)
from django.db.models import Q
# sไฝฟ็จQๆฅ็ญ้ไธ็ญไบ'<span style="margin-left: 19px;color: gray;">ๆ </span>'็้กน
mubiao_data_list = RdmStatic.objects.filter(~Q(day_task_name='[]')).\
filter(~Q(week_task_deck='<span style="margin-left: 19px;color: gray;">ๆ </span>')).\
filter(people_name=people_name).filter(is_week=False).order_by('-id') #็ญ้ๅบๆๆ็็ธๅบไบบๅ็ๆฅ่ฎฐๅฝ,ๆ็
งidๅๅบๆๅ
all_task_name_list = []
all_task_desc_list = []
all_task_quse_list = []
for mubiao_data_one in mubiao_data_list:
day_date = mubiao_data_one.day_date
new_day_date_list = []
for one_char in day_date:
if one_char in "0123456789-":
new_day_date_list.append(one_char)
new_day_date = "".join(new_day_date_list)
#่ทๅๅฐๅ้กน็ๆฅๆ
print("ๅ้กน็ๆฅๆไธบ๏ผ%s"% new_day_date)
if start_date <= new_day_date and new_day_date<=end_date:
print("ๅจๆถ้ด่ๅดๅ
็ๆฅๆ๏ผ%s" % new_day_date)
#็ป่ฎกๅจๆถ้ด่ๅดๅ
็ๆฐๆฎ
#็ป่ฎกๆๆ็ไปปๅกๅ็งฐ
day_task_name = mubiao_data_one.day_task_name
print(day_task_name)
print(type(day_task_name))
day_task_name_list = eval(day_task_name) #eval()ๅฝๆฐๅฐๅ่กจๆ ทๅผ็ๅญ็ฌฆไธฒ่ชๅจ่ฝฌไธบๅ่กจ
print("day_task_name_list:")
print(day_task_name_list)
print(type(day_task_name_list))
for day_task_name_one in day_task_name_list:
if day_task_name_one not in all_task_name_list:
all_task_name_list.append(day_task_name_one)
#็ป่ฎกๆๆไปปๅก่ฏฆๆ
day_task_desc = mubiao_data_one.day_task_desc
if day_task_desc not in all_task_desc_list:
all_task_desc_list.append(day_task_desc)
#็ป่ฎกๆๆ้ฎ้ข่ฏฆๆ
day_task_quse = mubiao_data_one.day_task_quse
if day_task_quse not in all_task_quse_list:
all_task_quse_list.append(day_task_quse)
print("ๆๆไปปๅกๅ็งฐ๏ผ")
print(all_task_name_list)
print("ๆๆไปปๅก่ฏฆๆ
๏ผ")
print(all_task_desc_list)
print("ๆๆ้ฎ้ข่ฏฆๆ
๏ผ")
print(all_task_quse_list)
rdmautostatic.all_task_name = all_task_name_list
rdmautostatic.all_task_desc = all_task_desc_list
rdmautostatic.all_task_quse = all_task_quse_list
rdmautostatic.save() #ไฟๅญๅ
ฅๅบ
print("้ๅฎๅ่ฟๅ'/reportdatas/rdmautostatic/'")
return HttpResponseRedirect('/reportdatas/rdmautostatic/') #้ๅฎๅๅฐ่ฏฅ้กต้ข
#ๆ นๆฎๆฐๆฎๅบๅ
ๅฎน่ชๅจๅๅนถ็ๆไปปๅกๅ็งฐๅไปปๅก่ฏฆๆ
ๅ้ฎ้ข่ฏฆๆ
def RdmConfigRequest(request, rdmconfig_id, trackback=None):
rdmconfig = RdmConfig.objects.get(id=int(rdmconfig_id)) # ่ทๅ็จไพ
rdm_url = rdmconfig.rdm_url
rdm_account = rdmconfig.rdm_account
rdm_password = rdmconfig.rdm_password
recode_year = rdmconfig.recode_year
print("RDM็ฝๅ๏ผ%s" % rdm_url)
print("RDM็ปๅฝ่ดฆๅท๏ผ%s" % rdm_account)
print("RDM็ปๅฝๅฏ็ ๏ผ%s" % rdm_password)
print("RDM็ป่ฎกๆฅๅฟๅนด้๏ผ%s" % recode_year)
from .autoStaticRDMTask import WebRemoteUphild
loginurl= rdm_url
loginaccount= rdm_account
loginpassword= rdm_password
predate = recode_year
print("ๅผๅงๆง่กๅผๆญฅๅฝๆฐ")
wc = WebRemoteUphild(loginurl=loginurl,loginaccount=loginaccount,loginpassword=loginpassword,predate=predate)
wc.run()
print("ๅฝๆฐๅผๅง่ฟ่กๅฎๆๅ")
print("้ๅฎๅ่ฟๅ'/reportdatas/rdmconfig/'")
return HttpResponseRedirect('/reportdatas/rdmconfig/') #้ๅฎๅๅฐ่ฏฅ้กต้ข
|
wawj901124/shangbaogongju
|
apps/reportdatas/views.py
|
views.py
|
py
| 4,480 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75177516346
|
from . import appsettings as local_settings
from ..models import ProperName
from .lexicalsort import lexicalsort
from .utilities import json_safe, apostrophe_unmasker
from .lemmalookup import lemma_lookup
from .lightpospicker import light_pos_picker
CORE_WORDS = local_settings.CORE_WORDS
CALENDAR = local_settings.CALENDAR
INDEFINITE_ARTICLES = local_settings.INDEFINITE_ARTICLES
INFLECTIONS = local_settings.INFLECTIONS
WORDLIKE = local_settings.WORDLIKE
PROPER_NAME_BIGRAMS = local_settings.PROPER_NAME_BIGRAMS
PROPER_NAME_ENDS = local_settings.PROPER_NAME_ENDS
PROPER_NAME_STARTS = local_settings.PROPER_NAME_STARTS
PROPER_NAME_TITLES = local_settings.PROPER_NAME_TITLES
PREFIXES = local_settings.PREFIXES
CLOSING_PUNCTUATION = local_settings.CLOSING_PUNCTUATION
UNSPACED = set(('(', '[', '\u201c', '/', '-')) # no space before these
class Token(object):
lemma_cache = {}
docyear = None
docperiod = None
def __init__(self, token, sentence):
self.token_verbatim = _token_cleanup(token)
self.token = _token_adjusted(self.token_verbatim, self.docyear)
self.sentence = sentence
self.first = False # Is this the first token in its sentence?
self.last = False # Is this the last token in its sentence?
self.previous = None # The preceding token
self.next = None # The following token
self.proper_name = None # Is this a proper name?
self.newline = False # Does this token start a new line?
self.count = 0 # Occurrences of the token's lemma (gets set later)
self.skip = False
self.propername_test = None
self.wordclass = None
@classmethod
def clear_cache(cls):
cls.lemma_cache = {}
@classmethod
def set_year(cls, year):
cls.docyear = year
periods = []
for p in (1800, 1900, 2000):
periods.append((p, abs(p - year)))
periods.sort(key=lambda p: p[1])
cls.docperiod = 'f%d' % periods[0][0]
def lower(self):
return self.token.lower()
def upper(self):
return self.token.upper()
def lexical_sort(self):
return lexicalsort(self.token)
def is_capitalized(self):
"""
Return True if this token is capitalized (title-cased)
"""
if len(self.token_verbatim) > 1 and self.token_verbatim.istitle():
return True
elif self.token_verbatim.startswith('Mc'):
return True
else:
return False
def follows_indefinite_article(self):
"""
Return True if this token comes immediately after an
indefinite article.
"""
if (self.previous is not None and
self.previous.lower() in INDEFINITE_ARTICLES):
return True
else:
return False
def is_wordlike(self):
"""
Return True if this looks roughly like a word.
This determines whether the token will be investigated
further (looked up n the database, etc.)
"""
try:
return self._is_wordlike
except AttributeError:
if self.token.isalpha():
self._is_wordlike = True
elif (len(self.token) > 2 and
all([segment.isalpha() for segment in self.token.split('-')])):
self._is_wordlike = True
elif (len(self.token) > 2 and
all([segment.isalpha() for segment in
self.token.strip("'").split("'")])):
self._is_wordlike = True
elif self.token in WORDLIKE:
self._is_wordlike = True
else:
self._is_wordlike = False
return self._is_wordlike
def is_core(self):
if self.lower() in CORE_WORDS:
return True
else:
return False
def is_prefix(self):
return self.lower() in PREFIXES and self.next_token() == '-'
def starts_sentence(self):
if self.first:
return True
elif (self.previous and
self.previous.first and
not self.previous.is_wordlike()):
return True
else:
return False
def next_token(self):
try:
return self.next.token
except AttributeError:
return None
def previous_token(self):
try:
return self.previous.token
except AttributeError:
return None
def next_token_verbatim(self):
try:
return self.next.token_verbatim
except AttributeError:
return None
def previous_token_verbatim(self):
try:
return self.previous.token_verbatim
except AttributeError:
return None
def check_proper_name(self, method=None):
if self.proper_name is not None:
return
elif (not self.is_wordlike() or
not self.is_capitalized() or
self.is_prefix()):
self.proper_name = False
elif not method or method == 'capitalization':
if self._is_proper_bigram():
self.proper_name = True
self.next.proper_name = True
elif self.is_core():
self.proper_name = False
elif self.lower() in INFLECTIONS:
self.proper_name = False
elif self.lower() in CALENDAR:
self.proper_name = False
elif self.token == 'I':
self.proper_name = False
elif self.follows_indefinite_article():
self.proper_name = False
elif not self.lemma_manager():
self.proper_name = True
elif (method == 'neighbours' and
self.is_capitalized() and
not self.starts_sentence()):
if (self.previous and
self.previous.is_capitalized() and
not self.previous.starts_sentence() and
self.previous.proper_name is not False):
self.proper_name = True
self.previous.proper_name = True
elif (self.next and
self.next.is_capitalized() and
self.next.proper_name is not False):
self.proper_name = True
self.next.proper_name = True
elif method == 'unambiguous':
qset = ProperName.objects.filter(sort=self.lexical_sort(),
lemma=self.token_verbatim)
if qset.exists() and qset.first().common:
self.proper_name = True
elif not qset.exists():
self.proper_name = False
else:
self.propername_test = qset.first().lemma
elif method == 'midsentence' and not self.starts_sentence():
if self.lemma_manager() and self.lemma_manager().lemma.istitle():
self.proper_name = False
elif self.propername_test:
self.proper_name = True
elif method == 'firstword' and self.starts_sentence():
if self.lemma_manager():
self.proper_name = False
else:
self.proper_name = True
def repair_tokenization_errors(self):
"""
In case of overzealous tokenization, e.g where 'wanna' has been
split to 'wan' + 'na', we repair it and skip the next token
"""
if self.next_token() == 'na' and self.lower() in ('wan', 'gon'):
self.token += 'na'
self.token_verbatim += 'na'
self.omit_next()
# d'ye -> do 'ye
if self.token in ('d', 'D') and self.next_token() in ("'ye", "'you"):
self.token = 'do'
def omit_previous(self):
"""
Mark the previous token for disposal; and move the 'previous'
pointer back to the preceding token (in any).
"""
if self.previous:
self.previous.skip = True
def omit_next(self):
"""
Mark the next token for disposal; and move the 'next'
pointer forward to the following token (in any).
"""
if self.next:
self.next.skip = True
def _is_proper_bigram(self):
if (self.next_token() in PROPER_NAME_ENDS and
self.is_capitalized()):
return True
elif ((self.token in PROPER_NAME_TITLES or
self.token in PROPER_NAME_STARTS) and
self.next and
self.next.is_capitalized()):
return True
elif (self.token in PROPER_NAME_BIGRAMS and
self.next_token() in PROPER_NAME_BIGRAMS[self.token]):
return True
else:
return False
def lemma(self):
if self.lemma_manager() is None:
return None
else:
return self.lemma_manager().lemma
def lemma_manager(self):
try:
return self._lemma_manager
except AttributeError:
self.find_lemma()
return self._lemma_manager
def is_matched_to_lemma(self, lemma_record):
if self.lemma_manager() is None:
return False
elif int(self.lemma_manager().id) == int(lemma_record.id):
return True
else:
return False
def reset_lemma(self, new_lemma_manager):
self._lemma_manager = new_lemma_manager
def nix_lemma(self):
self._lemma_manager = None
def is_in_oed(self):
if self.lemma_manager() is None:
return False
else:
return True
def is_missing_from_oed(self):
if (self.is_wordlike() and
not self.proper_name and
not self.is_prefix() and
not self.is_in_oed()):
return True
else:
return False
def find_lemma(self):
if self.is_wordlike() and not self.is_prefix():
try:
Token.lemma_cache[self.lower()]
except KeyError:
qset = lemma_lookup(self.token,
self.lexical_sort(),
self.docyear,
sentence_start=self.starts_sentence())
Token.lemma_cache[self.lower()] = qset
candidates = Token.lemma_cache[self.lower()]
if len(candidates) == 1:
self.reset_lemma(candidates[0].lemma)
self.token = candidates[0].wordform
self.wordclass = candidates[0].wordclass
elif len(candidates) > 1:
winner = self.pick_candidate_by_pos(candidates)
self.reset_lemma(winner.lemma)
self.token = winner.wordform
self.wordclass = winner.wordclass
else:
self.reset_lemma(None)
else:
self.reset_lemma(None)
def pick_candidate_by_pos(self, candidates):
return light_pos_picker(self, candidates)
def space_before(self):
if (self.token in CLOSING_PUNCTUATION or
self.token.startswith("'") or
self.token == "n't" or
self.token == '-'):
return False
elif self.previous_token() in UNSPACED:
return False
else:
return True
def status(self):
if self.is_in_oed():
_status = 'oed'
elif self.proper_name:
_status = 'proper'
elif self.is_prefix():
_status = 'exception'
elif self.is_wordlike():
_status = 'missing'
else:
_status = 'punc'
return _status
def to_list(self):
if self.newline:
space_before = 2
elif self.space_before():
space_before = 1
else:
space_before = 0
return [json_safe(self.token_verbatim),
self.status(),
space_before, ]
#=============================================================
# The following functions are not strictly needed by the
# application itself, but may be useful for testing.
# Essentially, these are all just wrappers for calls to the
# token's linked Lemma object (if any).
#=============================================================
def _languages(self):
if self.lemma_manager() is None:
return None, None
else:
language_name = self.lemma_manager().language_name()
family = self.lemma_manager().language_family()
return language_name, family
def language(self):
if self._languages()[0]:
return self._languages()[0]
else:
return 'not specified'
def language_family(self):
if self._languages()[1]:
return self._languages()[1]
else:
return 'other'
def url(self):
if self.lemma_manager() is None:
return None
else:
return self.lemma_manager().url()
def oed_identifier(self):
if self.lemma_manager() is None:
return None
else:
return self.lemma_manager().oed_identifier()
def frequency(self):
if self.lemma_manager() is None:
return None
else:
if self.lemma_manager().f2000 > 1:
return int(self.lemma_manager().f2000)
else:
return self.lemma_manager().f2000
def log_band(self):
if self.lemma_manager() is None:
return None
else:
return self.lemma_manager().log_band()
def _token_cleanup(text):
"""
Clean up the form of the token as it's initially passed in
"""
# Unmask apostrophes (previously masked in tokenizer() to
# prevent erroneous splitting).
text = apostrophe_unmasker(text)
# Convert double-quotes to a single character
if text == "``":
text = '\u201c'
elif text == "''":
text = '\u201d'
return text
def _token_adjusted(text, year):
if year > 1950 and text.endswith("in'"):
return text.rstrip("'") + 'g'
else:
return text
|
necrop/wordrobot
|
apps/tm/lib/token.py
|
token.py
|
py
| 14,303 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28585741124
|
import os
import math
BLOCK_SIZE = 16
UMAX = int(math.pow(256, BLOCK_SIZE))
def remove_line(s):
# returns the header line, and the rest of the file
return s[:s.index('\n') + 1], s[s.index('\n')+1:]
def parse_header_ppm(f):
data = f.read()
header = ""
for i in range(3):
header_i, data = remove_line(data)
header += header_i
return header, data
def to_bytes(n):
s = hex(n)
s_n = s[2:]
if 'L' in s_n:
s_n = s_n.replace('L', '')
if len(s_n) % 2 != 0:
s_n = '0' + s_n
decoded = s_n.decode('hex')
pad = (len(decoded) % BLOCK_SIZE)
if pad != 0:
decoded = "\0" * (BLOCK_SIZE - pad) + decoded
return decoded
def un_abc(ct):
blocks = [ct[i * BLOCK_SIZE:(i+1) * BLOCK_SIZE] for i in range(len(ct) / BLOCK_SIZE)]
for i in range(1, len(blocks)):
curr = int(blocks[len(blocks)-i].encode('hex'), 16)
prev = int(blocks[len(blocks)-i-1].encode('hex'), 16)
n_curr_blk = curr - prev
while n_curr_blk < 0:
curr += UMAX
n_curr_blk = curr - prev
blocks[len(blocks)-i] = to_bytes(n_curr_blk)
iv = blocks[0]
notabc = "".join(blocks[1:])
return iv, notabc
if __name__=="__main__":
with open('body.enc.ppm', 'rb') as f:
header, data = parse_header_ppm(f)
iv, o_img = un_abc(data)
with open('out.ppm', 'wb') as fw:
fw.write(header)
fw.write(o_img)
|
VermillionBird/CTF-Writeups
|
2019/picoCTF/Cryptography/AES-ABC/deabc.py
|
deabc.py
|
py
| 1,299 |
python
|
en
|
code
| 8 |
github-code
|
6
|
10423084331
|
#-*- coding: utf-8 -*-
"""
Provides a class that tracks the state of a validation process across schema
members.
@author: Martรญ Congost
@contact: [email protected]
@organization: Whads/Accent SL
@since: June 2008
"""
from cocktail.modeling import DictWrapper
from cocktail.schema.accessors import get
undefined = object()
class ValidationContext(DictWrapper):
"""A validation context encapsulates the state of a validation process.
Normally, an instance of this class will be created internally by calling
the L{validate<member.Member.validate>} or
L{get_errors<member.Member.get_errors>} methods, and made available to
validation rules throughout the validation process.
The class works like a dictionary, and can hold arbitrary key,value pairs
to influence the validation behavior or mantain validation state.
Also, the class allows compound members (L{schemas<schema.Schema>},
L{collections<schemacollection.Collection>} and others) to establish nested
contexts, through the use of the L{enter} and L{leave} method. This
mechanism also keeps track of the active validation L{path}.
"""
def __init__(self,
member,
value,
collection_index = None,
language = None,
parent_context = None,
**parameters):
DictWrapper.__init__(self)
self.__member = member
self.__value = value
self.__collection_index = collection_index
self.__language = language
self.__parent_context = parent_context
if parent_context is not None:
self._items.update(self.__parent_context._items)
self._items.update(member.validation_parameters)
self._items.update(parameters)
self.__trigger_validating_event(member)
def __trigger_validating_event(self, event_target):
if event_target.source_member:
self.__trigger_validating_event(event_target.source_member)
event_target.validating(context = self)
@property
def member(self):
return self.__member
@property
def value(self):
return self.__value
@property
def collection_index(self):
return self.__collection_index
@property
def language(self):
return self.__language
@property
def parent_context(self):
return self.__parent_context
def path(self, include_self = True):
if self.__parent_context:
for ancestor in self.__parent_context.path():
yield ancestor
if include_self:
yield self
def get_node_at_level(self, level):
if level < 0:
n = level
context = self
while n < -1:
context = context.__parent_context
if context is None:
raise IndexError(
"Invalid validation context level: %r" %
level
)
n += 1
return context
else:
return list(self.path)[level]
def get_value(self, key, default = None, language = None, stack_node = -1):
context = self.get_node_at_level(stack_node)
value = context.get_object()
for part in key.split("."):
value = get(value, part, undefined, language)
if value is undefined:
return default
return value
def get_object(self, stack_node = -1):
from cocktail.schema.schemaobject import SchemaObject
context = self.get_node_at_level(stack_node)
while context is not None:
if isinstance(context.value, (SchemaObject, dict)):
return context.value
context = context.__parent_context
def __bool__(self):
return True
def __setitem__(self, key, value):
self._items[key] = value
def setdefault(self, key, default):
self._items.setdefault(key, default)
def update(self, items, **kwargs):
self._items.update(items, kwargs)
|
marticongost/cocktail
|
cocktail/schema/validationcontext.py
|
validationcontext.py
|
py
| 4,068 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73674401786
|
def registro(bd):
nombre = input("Ingrese el nombre de usuario: ")
contrasenia = input("Ingrese la contraseรฑa: ")
bd[nombre] = contrasenia
def leerData(bd):
print("La informacion almacenada en la base de datos es: ")
for usu, contra in bd.items():
print(f"{usu}: {contra}")
def guardarArchivo(bd):
f = open("datos.txt", "wt")
f.write("{\n")
for usu, contra in bd.items():
f.write("\t")
f.write(f'"{usu}"')
f.write(": ")
f.write(f'"{contra}"')
f.write(",")
f.write("\n")
f.write("}")
def login(bd):
usuario = input("Ingrese su usuario: ")
try:
contra = bd[usuario]
contrasenia = input("Ingrese su contraseรฑa: ")
if(contra == contrasenia):
print("Has iniciado sesiรณn")
else:
print("Contraseรฑa incorrecta")
except:
print("No se ha encontrado el usuario")
BD = {}
registro(BD)
leerData(BD)
guardarArchivo(BD)
login(BD)
|
DanielFranco92/pre-entrega
|
main.py
|
main.py
|
py
| 1,036 |
python
|
es
|
code
| 0 |
github-code
|
6
|
73027941309
|
from google.cloud import storage
import os
input_folder = "../Crop_Reports/Bengal Gazettes Chunks/"
bucket_name = "calcutta-gazette"
def explicit(bucket_name, source_name, path):
# Explicitly use service account credentials by specifying the private key
# file.
storage_client = storage.Client.from_service_account_json('../API_Keys/Famine Research OCR-cdf9018b001d.json')
destination_name = source_name
source_name2 = os.path.join(path, source_name)
# Make an authenticated API request
# buckets = list(storage_client.list_buckets())
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(destination_name)
if not blob.exists():
blob.upload_from_filename(source_name2)
if __name__ == '__main__':
folder_list = os.listdir(input_folder)
for folder in folder_list:
path = os.path.join(input_folder, folder)
file_list = os.listdir(path)
for file in file_list:
print(file)
explicit(bucket_name, file, path)
|
jgoman99/British-Bengal-Weekly-Crop-Reports
|
Python Code/splits_to_cloud.py
|
splits_to_cloud.py
|
py
| 1,067 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38046769762
|
#word guessing game in python
import random
def choose_random_word():
words = ['rainbow', 'computer', 'science', 'programming',
'python', 'mathematics', 'player', 'condition',
'reverse', 'water', 'board', 'geeks']
return random.choice(words)
def display_word(word, guesses):
for char in word:
if char in guesses:
print(char, end=" ")
else:
print("_", end=" ")
print()
def play_game():
name = input("What is your name? ")
print("Good Luck, ", name)
word = choose_random_word()
guesses = ''
turns = 12
while turns > 0:
display_word(word, guesses)
if all(char in guesses for char in word):
print("You Win")
print("The word is:", word)
break
guess = input("Guess a character: ")
if len(guess) != 1 or not guess.isalpha():
print("Invalid input. Please enter a single alphabet character.")
continue
guesses += guess
if guess not in word:
turns -= 1
print("Wrong")
print("You have", turns, "more guesses")
if turns == 0:
print("You Lose")
print("The word was:", word)
if __name__ == "__main__":
play_game()
|
akshaybannatti/Word-Guessing-Game-Python
|
#word guessing game in python.py
|
#word guessing game in python.py
|
py
| 1,372 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16586269759
|
from flask import Blueprint, render_template
from app.models import Post
home = Blueprint('home', __name__)
@home.route('/')
def index():
posts = Post.query.filter_by(published=True).all()
return render_template('home/index.html', posts=posts)
|
rg3915/flask-masterclass
|
app/blueprints/home_blueprint.py
|
home_blueprint.py
|
py
| 256 |
python
|
en
|
code
| 1 |
github-code
|
6
|
11692218331
|
#1
celegans_phenotypes = ['Emb', 'Him', 'Unc', 'Lon', 'Dpy', 'Sma']
for phenotype in celegans_phenotypes:
print(phenotype)
#2
half_lives = [87.74, 24110.0, 6537.0, 14.4, 376000.0]
for value in half_lives:
print(value, end='')
#3
more_whales = [5, 4, 7, 3, 2, 3, 2, 6, 4, 2, 1, 7, 1,3]
more_whales = []
for count in more_whales:
more_whales.append(count + 1)
print(more_whales)
#4
alkaline_earth_metals = [[4, 9.012], [112, 24.305], [20, 40.078], [38, 87.62], [56, 137.327], [88, 226]]
for inner_list in alkaline_earth_metals:
print(inner_list[0])
print(inner_list[1])
number_and_weight = []
for inner_list in alkaline_earth_metals:
number_and_weight.append(inner_list[0])
number_and_weight.append(inner_list[1])
#5
def mystery_function(values):
"""(list) -> list
Return a copy of the list, values, and the sublists it contains.
The tope_level sublists have their elements reversed in the returned list.number_and_weight
mystery_function([[1, 2, 3], [4, 5, 6]]) [[3, 2, 1], [6, 5, 4]]
"""
return mystery_function
#6
text = "quit"
text = input("Please enter a chemical formula(or 'quit' to exit): ")
if text == "quit":
print("...exiting program")
elif text == "H2O":
print("Water")
elif text == "NH3":
print("Ammonia")
elif text == "CH4":
print("Methane")
else:
print("Unkown Compount")
#7
country_populations = [1295, 23, 7, 3, 47, 21]
total = 0
for population in country_populations:
total += population
print(country_populations)
#8
print('rat_1')
print('rat_2')
print('1 weighed more then 2')
print('1 weighed less than 2')
#9
for number in range(33, 50):
print(number)
#10
for number in range(10):
print(10 - number, end='')
#11
sum = 0
count = 0
for number in range(2, 23):
sum += number
count += 1
average = sum / count
#12
def remove_neg(num_list):
"""(list of number) -> NoneType
Remove the negative numbers from the list num_list.count
numbers = [-5, 1, -3, 2]]
remove_neg(numbers)
numbers
[1, 2]
"""
for item in num_list:
if item < 0:
num_list.remove(item)
return(num_list)
#13
for width in range(1, 8):
print('T' * width)
#14
for width in range(1, 8):
print('T' * width)
#15
width = 1
while width < 8:
print('T' * width)
width += 1
#16
week = 1
print(week)
week = 0
print(week)
|
LDavis21/Assignments.github.io
|
assignment4/PPch9.py
|
PPch9.py
|
py
| 2,541 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22400150737
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QModelIndex,QItemSelectionModel
from diz import *
import sys
from BD import Orm
from dialog import Dialog
from dizain1_2 import TwoWindow
from dialog2 import Dialog2
bd = Orm()
class InputDialog(QtWidgets.QDialog):
def __init__(self, root, **kwargs):
super().__init__(root, **kwargs)
self.win = root
label = QtWidgets.QLabel('ะะฒะตะดะธัะต ะฝะฐะทะฒะฐะฝะธะต')
self.edit = QtWidgets.QLineEdit()
button = QtWidgets.QPushButton('ะะฐะนัะธ')
button.clicked.connect(self.push)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(label)
layout.addWidget(self.edit)
layout.addWidget(button)
self.setLayout(layout)
def push(self):
if self.edit.text():
r = bd.search_mater(self.edit.text())
if r:
self.win.now(r)
self.close()
self.win.hid()
else:
msg = QMessageBox()
msg.setWindowTitle("ะัะธะฑะบะฐ")
msg.setText("ะะต ะฝะฐะนะดะตะฝะพ ")
msg.addButton('ะะบ', QMessageBox.RejectRole)
msg.exec()
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# ะทะฐะณะพะปะพะฒะบะธ ะดะปั ััะพะปะฑัะพะฒ.
self.ui.tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
self.ui.pushButton.clicked.connect(self.addfac)
self.ui.pushButton_2.clicked.connect(self.addmat)
self.ui.pushButton_4.clicked.connect(self.search)
self.ui.pushButton_5.hide()
self.ui.pushButton_5.clicked.connect(self.tomain)
self.now(bd.allmat())
self.ui.pushButton_3.clicked.connect(self.delmat)
self.id=False
def now(self, data):
if data:
self.ui.tableWidget.setEnabled(True)
self.ui.pushButton_3.setEnabled(True)
self.ui.pushButton_4.setEnabled(True)
# ััะดั ะธ ััะพะปะฑัั
self.ui.tableWidget.setRowCount(
len(data)
)
self.ui.tableWidget.setColumnCount(
len(data[0])
)
self.ui.tableWidget.setHorizontalHeaderLabels(
('Id', 'ะะฐะทะฒะฐะฝะธะต ะผะฐัะตัะธะฐะปะฐ', 'ะคะธัะผะฐ', 'ะะฐะณะฐะทะธะฝ', 'ะะพััะฐะฒัะธะบ',
'ะะฐะปะธัะธะต ััะตัะฐ', 'ะะฐะปะธัะธะต ะะะก', 'ะะพะปะธัะตััะฒะพ', 'ะฆะตะฝะฐ')
)
row = 0
for tup in data:
col = 0
for item in tup:
cellinfo = QTableWidgetItem(str(item))
cellinfo.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
)
self.ui.tableWidget.setItem(row, col, cellinfo)
# self.ui.tableWidget.horizontalHeader().setSectionResizeMode(col , QHeaderView.Stretch)
col += 1
row += 1
self.ui.tableWidget.resizeColumnsToContents()
self.ui.tableWidget.horizontalHeader().setSectionResizeMode(col - 1, QHeaderView.Stretch)
else:
self.ui.tableWidget.clear()
self.ui.tableWidget.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
self.ui.pushButton_4.setEnabled(False)
def addmat(self):
self.dualog = Dialog()
self.dualog.exec()
self.now(bd.allmat())
def addfac(self):
if not self.id:
self.now(bd.allmat())
msg = QMessageBox()
msg.setWindowTitle("ะัะธะฑะบะฐ")
msg.setText("ะั ะฝะต ะฒัะฑัะฐะปะธ ะฝะต ะพะดะธะฝ ะดะพะณะพะฒะพั")
msg.addButton('ะะบ', QMessageBox.RejectRole)
msg.exec()
else:
print(self.id)
self.now(bd.allmat())
self.dualog2 = Dialog2(self.id)
self.dualog2.exec()
self.now(bd.allmat())
def delmat(self):
if not self.id:
self.now(bd.allmat())
msg = QMessageBox()
msg.setWindowTitle("ะัะธะฑะบะฐ")
msg.setText("ะั ะฝะต ะฒัะฑัะฐะปะธ ะฝะต ะพะดะธะฝ ะดะพะณะพะฒะพั")
msg.addButton('ะะบ', QMessageBox.RejectRole)
msg.exec()
else:
print(self.id)
bd.delmat(self.id)
self.now(bd.allmat())
@pyqtSlot(QModelIndex)
def on_tableWidget_clicked(self, index: QModelIndex): # ะฟะพะปััะตะฝะธะต ะธะฝะดะตะบัะฐ ัััะพะบะธ ะฟัะธ ะฝะฐะถะฐัะธะต
self.id = int(self.ui.tableWidget.item(index.row(), 0).text())
print(self.id)
@pyqtSlot(QModelIndex)
def on_tableWidget_doubleClicked(self, index: QModelIndex): # ะฟะพะปััะตะฝะธะต ัะฟะธัะบะฐ ะพะฑัะตะบัะพะฒ
r = int(self.ui.tableWidget.item(index.row(), 0).text())
data = bd.allfac(r)
if not data:
msg = QMessageBox()
msg.setWindowTitle("ะัะธะฑะบะฐ")
msg.setText("ะะตั ะทะฐะฟะธัะตะน ะพะฑัะตะบัะฐ")
msg.addButton('ะะบ', QMessageBox.RejectRole)
msg.exec()
else:
self.twow = TwoWindow(r)
self.twow.show()
self.twow.now(data)
def search(self):
self.search = InputDialog(self)
self.search.exec()
def hid(self):
self.ui.pushButton_5.show()
self.ui.pushButton_4.hide()
def tomain(self):
self.now(bd.allmat())
self.ui.pushButton_5.hide()
self.ui.pushButton_4.show()
app = QtWidgets.QApplication([])
win = MainWindow()
# win.now(data)
win.show()
sys.exit(app.exec())
|
Vorlogg/BD
|
dizain.py
|
dizain.py
|
py
| 5,829 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
29579809040
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 7 11:04:13 2018
@author: Akitaka
"""
# 1๏ผใฉใคใใฉใชใฎใคใณใใผใ--------------------------------
import numpy as np #numpyใจใใ่กๅใชใฉใๆฑใใฉใคใใฉใชใๅฉ็จ
import pandas as pd #pandasใจใใใใผใฟๅๆใฉใคใใฉใชใๅฉ็จ
import matplotlib.pyplot as plt #ใใญใใ็จใฎใฉใคใใฉใชใๅฉ็จ
from sklearn import linear_model, metrics, preprocessing, cross_validation #ๆฉๆขฐๅญฆ็ฟ็จใฎใฉใคใใฉใชใๅฉ็จ
from mlxtend.plotting import plot_decision_regions #ๅญฆ็ฟ็ตๆใใใญใใใใๅค้จใฉใคใใฉใชใๅฉ็จ
from sklearn.kernel_approximation import RBFSampler #ใซใผใใซ่ฟไผผ็จใฎ้ขๆฐ
from matplotlib.colors import ListedColormap #plot็จ
# 2๏ผXORใฎใใผใฟใไฝๆใใ(x=ๆญฃใy=ๆญฃ)=0,(x=ๆญฃใy=่ฒ )=1, ็ใช--------------
np.random.seed(0)
X_xor=np.random.randn(200,2)
y_xor=np.logical_xor(X_xor[:,0]>0, X_xor[:,1]>0)
y_xor=np.where(y_xor,1,0)
pd.DataFrame(y_xor) #ใใฎ่กใๅฎ่กใใใจใใผใฟใ่ฆใใ
# 3๏ผใใญใใใใฆใฟใ------------------------------------------------------
#%matplotlib inline
plt.scatter(X_xor[y_xor==1, 0], X_xor[y_xor==1, 1], c='b', marker='x', label='1')
plt.scatter(X_xor[y_xor==0, 0], X_xor[y_xor==0, 1], c='r', marker='s', label='0')
plt.legend(loc='best')
plt.show
# 4๏ผใใผใฟใฎๆดๅฝข-------------------------------------------------------
X_std=X_xor
z=y_xor
#่งฃ่ชฌ 5๏ผใซใผใใซ่ฟไผผใ้ฉ็จใใ------------------------------------------
rbf_feature = RBFSampler(gamma=1, n_components=100, random_state=1)
X_std = rbf_feature.fit_transform(X_std)
print("X_stdใฎๅคงใใ ",pd.DataFrame(X_std).shape)
#pd.DataFrame(X_std).to_clipboard() #ใใใงใฏใชใใใใผใใซไฟๆใงใใใฎใงใจใฏใปใซใซ่ฒผใใ
# 6๏ผๆฉๆขฐๅญฆ็ฟใงๅ้กใใ---------------------------------------------------
clf_result=linear_model.SGDClassifier(loss="hinge") #loss="hinge", loss="log"
# 7๏ผKๅๅฒไบคๅทฎๆค่จผ๏ผcross validation๏ผใงๆง่ฝใ่ฉไพกใใ---------------------
scores=cross_validation.cross_val_score(clf_result, X_std, z, cv=10)
print("ๅนณๅๆญฃ่งฃ็ = ", scores.mean())
print("ๆญฃ่งฃ็ใฎๆจๆบๅๅทฎ = ", scores.std())
# 8๏ผใใฌใผใใณใฐใใผใฟใจใในใใใผใฟใซๅใใฆๅฎ่กใใฆใฟใ------------------
X_train, X_test, train_label, test_label=cross_validation.train_test_split(X_std,z, test_size=0.1, random_state=1)
clf_result.fit(X_train, train_label)
#ๆญฃ็ญ็ใๆฑใใ
pre=clf_result.predict(X_test)
ac_score=metrics.accuracy_score(test_label,pre)
print("ๆญฃ็ญ็ = ",ac_score)
# ่งฃ่ชฌ 9๏ผPlotใใ
x1_min, x1_max, x2_min, x2_max=-3, 3, -3, 3
resolution=0.02
xx1, xx2=np.meshgrid(np.arange(x1_min, x1_max, resolution),np.arange(x2_min, x2_max, resolution))
X=(np.array([xx1.ravel(), xx2.ravel()]).T)
plot_z=clf_result.predict(rbf_feature.fit_transform(X))
colors=('red','blue')
cmap=ListedColormap(colors[:len(np.unique(plot_z))])
plot_z=plot_z.reshape(xx1.shape)
plt.contourf(xx1,xx2, plot_z, alpha=0.4, cmap=cmap)
|
nakanishi-akitaka/python2018_backup
|
1207/ml2b.py
|
ml2b.py
|
py
| 3,157 |
python
|
ja
|
code
| 5 |
github-code
|
6
|
21424331672
|
'''
1. ะะฐััะตั ะพะดะฝะพะฟะพัะพัะฝัะน.
2. ะะฐะผะตั ะฒัะตะผะตะฝะธ
3. Multiprocessing Pool
4. ะะฐะผะตั ะฒัะตะผะตะฝะธ
5. ะญะบัะฟะพัั ะฒ csv
'''
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from multiprocessing import Pool
import csv
import time
def get_html(url):
r = requests.get(url) # Response
return r.text # ะะพะทะฒัะฐัะฐะตั HTML-ะบะพะด ัััะฐะฝะธัั(url)
def get_all_links(html):
counter = 0
soup = BeautifulSoup(html, 'lxml')
tags_div = soup.find('div').find_all('div', class_="cmc-table__column-name sc-1kxikfi-0 eTVhdN")
links = []
for td in tags_div:
a = td.find('a').get('href') #string
link = "https://coinmarketcap.com" + a
links.append(link)
return links
def get_page_data(html):
soup = BeautifulSoup(html, 'lxml')
try:
name = soup.find("h1").text.strip()
except:
name = ""
try:
price = soup.find("span", class_="cmc-details-panel-price__price").text.strip()
except:
price = ""
data = {'name': name, 'price': price}
return data
def write_csv(data):
with open('coinmarketcap.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow((data['name'],
data['price']))
print(data['name'], 'parsed')
def make_all(url):
html = get_html(url)
data = get_page_data(html)
write_csv(data)
# time.sleep(5)
def main():
start = time.time()
url = "https://coinmarketcap.com/all/views/all/"
all_links = get_all_links(get_html(url))
with Pool(40) as p:
p.map(make_all, all_links)
end = time.time()
total = end - start
print(str(total))
if __name__ == "__main__":
main()
|
DexterAkaGrich/potential-couscous
|
first_meet.py
|
first_meet.py
|
py
| 1,849 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31858846976
|
#!/bin/python3
import os
def isBalanced(brackets):
stack = []
for bracket in brackets:
if bracket in ['{', '[', '(']:
stack.append(bracket)
else:
if len(stack) == 0:
return 'NO'
last = stack.pop()
isCurly = bracket == '}' and last != '{'
isParenthesis = bracket == ')' and last != '('
isSquare = bracket == ']' and last != '['
if isCurly or isParenthesis or isSquare:
return 'NO'
if len(stack) != 0:
return 'NO'
return 'YES'
def main():
with open(os.environ['OUTPUT_PATH'], 'w') as fptr:
t = int(input())
for t_itr in range(t):
s = input()
result = isBalanced(s)
fptr.write(result + '\n')
if __name__ == '__main__':
main()
|
caioportela/code-challenges
|
hackerrank/problem-solving/balanced-brackets.py
|
balanced-brackets.py
|
py
| 860 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20656199478
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Create a Milo input file from a frequency calculation.
It must be a Gaussian 09 or 16 high-precision frequency calculation. You
request this with '# freq=(hpmodes) ... '.
"""
import argparse
import sys
from milo_1_0_3 import atom
from milo_1_0_3 import containers
from milo_1_0_3 import enumerations as enums
from milo_1_0_3 import exceptions
from milo_1_0_3 import program_state as ps
def main():
"""Parse frequency file and print to new Milo input."""
parser = argparse.ArgumentParser(description="Make a Milo input file "
"from a high-precision Gaussian frequency"
" calculation.\n")
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin, help="Frequency calculation file. "
"<stdin> by default.")
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
default=sys.stdout, help="New Milo input file. "
"<stdout> by default.")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Print other parameters in $job section. "
"-v for common parameters, -vv for all parameters")
args = parser.parse_args()
program_state = ps.ProgramState()
try:
parse_gaussian_header(args.infile, program_state)
parse_gaussian_charge_spin(args.infile, program_state)
parse_gaussian_molecule_data(args.infile, program_state)
parse_gaussian_frequency_data(args.infile, program_state)
parse_gaussian_isotope_data(args.infile, program_state)
print_job_section(args.outfile, program_state, args.verbose)
print_output_comment(args.infile, args.outfile)
print_molecule_section(args.outfile, program_state)
print_frequency_data_section(args.outfile, program_state)
except Exception as e:
print("Oh no! It looks like there was an error!")
print("Error message:", e)
print("\nPython error details:")
raise
def parse_gaussian_header(input_iterable, program_state):
"""
Parse gaussian_header from frequency file.
Looking for:
******************************************
------------------------------
# opt freq=hpmodes m062x/3-21g
------------------------------
Result:
gaussian_header = 'm062x/3-21g'
"""
past_warning = False
lines = list()
for line in input_iterable:
if "*****" in line:
past_warning = True
if past_warning and "-----" in line:
for next_line in input_iterable:
if "-----" in next_line:
break
lines.append(next_line[1:].strip("\n"))
clean_line = "".join(lines).strip()
if "hpmodes" not in clean_line.casefold():
raise exceptions.InputError("Must be high-precision frequency "
"calculation. Use 'freq=hpmodes'.")
tokens = clean_line.split()
tokens = [x for x in tokens if "#" not in x
and "opt" not in x.casefold()
and "freq" not in x.casefold()]
program_state.gaussian_header = " ".join(tokens)
return
raise exceptions.InputError("Error parsing gaussian_header.")
def parse_gaussian_charge_spin(input_iterable, program_state):
"""
Parse charge and spin multiplicity from frequency file.
Looking for:
---------------------------------------------
Symbolic Z-matrix:
Charge = 0 Multiplicity = 1
O -0.19334 -0.19871 0.
"""
for line in input_iterable:
if "Charge =" in line:
program_state.charge = int(line.split()[2])
program_state.spin = int(line.split()[5])
return
raise exceptions.InputError("Error parsing charge and spin multiplicity.")
def parse_gaussian_molecule_data(input_iterable, program_state):
"""
Parse molecule data from frequency file.
Will pull the last "Standard orientation:" in the log file, or the last
"Input orientation:" if there is no "Standard orientation:" (for example,
if the nosymm keyword is used).
Looking for:
Standard orientation:
---------------------------------------------------------------------
Center Atomic Atomic Coordinates (Angstroms)
Number Number Type X Y Z
---------------------------------------------------------------------
"""
for line in input_iterable:
if "Harmonic frequencies (cm**-1)" in line:
return
if "Input orientation:" in line or "Standard orientation:" in line:
positions = containers.Positions()
for coordinate_line in input_iterable:
if ("Rotational constants" in coordinate_line or
"Distance matrix" in coordinate_line):
break
coordinates = coordinate_line.split()
if coordinates[0].isnumeric():
x = float(coordinates[3])
y = float(coordinates[4])
z = float(coordinates[5])
positions.append(x, y, z, enums.DistanceUnits.ANGSTROM)
program_state.input_structure = positions
raise exceptions.InputError("Error parsing molecule data.")
def parse_gaussian_frequency_data(input_iterable, program_state):
"""
Parse frequency data from frequency file.
Will pull the first time they are listed (with high-precision).
Looking for:
Frequencies --- 1682.1354 3524.4296 3668.7401
Reduced masses --- 1.0895 1.0389 1.0827
Force constants --- 1.8163 7.6032 8.5864
IR Intensities --- 52.8486 4.2243 0.3831
Coord Atom Element:
1 1 8 -0.00000 0.00000 -0.00000
2 1 8 0.00000 -0.00000 -0.07070
3 1 8 -0.07382 0.04553 -0.00000
1 2 1 0.00000 0.00000 0.00000
2 2 1 0.39258 0.60700 0.56106
3 2 1 0.58580 -0.36126 -0.42745
1 3 1 0.00000 -0.00000 0.00000
2 3 1 -0.39258 -0.60700 0.56106
3 3 1 0.58580 -0.36126 0.42745
Harmonic frequencies (cm**-1), IR intensities (KM/Mole), Raman scatt
activities (A**4/AMU), depolarization ratios for plane and unpolariz
"""
has_started = False
for line in input_iterable:
if "Frequencies ---" in line:
has_started = True
for frequency in line.split()[2:]:
program_state.frequencies.append(float(frequency),
enums.FrequencyUnits
.RECIP_CM)
elif "Reduced masses ---" in line:
for reduced_mass in line.split()[3:]:
program_state.reduced_masses\
.append(float(reduced_mass), enums.MassUnits.AMU)
elif "Force constants ---" in line:
for force_constant in line.split()[3:]:
program_state.force_constants\
.append(float(force_constant), enums.ForceConstantUnits
.MILLIDYNE_PER_ANGSTROM)
elif "Coord Atom Element:" in line:
data_in_columns = list()
for coordinate_line in input_iterable:
if ("Harmonic frequencies (cm**-1)" in coordinate_line
or " " in coordinate_line):
break
data_in_columns.append(coordinate_line.split()[3:])
data_in_rows = list(zip(*data_in_columns))
for frequency in data_in_rows:
program_state.mode_displacements.append(containers.Positions())
for x, y, z in zip(*[iter(frequency)] * 3):
program_state.mode_displacements[-1].append(float(x),
float(y), float(z), enums.DistanceUnits.ANGSTROM)
elif has_started and "activities (A**4/AMU)" in line:
return
raise exceptions.InputError("Error parsing frequency data.")
def parse_gaussian_isotope_data(input_iterable, program_state):
"""
Parse isotope and atomic number data from frequency file.
Looking for:
-------------------
- Thermochemistry -
-------------------
Temperature 298.150 Kelvin. Pressure 1.00000 Atm.
Atom 1 has atomic number 8 and mass 15.99491
Atom 2 has atomic number 1 and mass 1.00783
Atom 3 has atomic number 1 and mass 1.00783
Molecular mass: 18.01056 amu.
"""
for line in input_iterable:
if "Thermochemistry" in line:
atoms = list()
for mass_line in input_iterable:
if "Molecular mass" in mass_line:
break
split_line = mass_line.split()
if split_line[0] == "Atom":
atomic_number = int(split_line[5])
atoms.append(atom.Atom.from_atomic_number(atomic_number))
atoms[-1].change_mass(split_line[8])
program_state.atoms = atoms
return
raise exceptions.InputError("Error parsing isotope data.")
def print_section(output_iterable, section_name, inside):
"""Print a section to output_iterable."""
stdout = sys.stdout
sys.stdout = output_iterable
print(f"${section_name}")
print(inside)
print("$end")
print()
sys.stdout = stdout
def print_job_section(output_iterable, program_state, verbose):
"""
Print the $job section with gaussian_header from program_state.
verbose controls how other job parameters are printed.
"""
section = list()
section.append(" gaussian_header "
f"{program_state.gaussian_header}")
if verbose >= 1:
section.append(" # step_size 1.00 # in femtoseconds")
section.append(" # max_steps 100 # or no_limit")
section.append(" # temperature 298.15 # in kelvin")
section.append(" # phase bring_together n m"
" # or push_apart n m")
section.append(" # memory 24 # in GB")
section.append(" # processors 24")
section.append(" # random_seed generate # or an "
"integer")
if verbose >= 2:
section.append(" # oscillator_type quasiclassical")
section.append(" # geometry_displacement off")
section.append(" # rotational_energy off")
section.append(" # energy_boost off")
section.append(" # integration_algorithm verlet")
section.append(" # program gaussian16")
section.append(" # fixed_mode_direction n 1 # or n -1")
print_section(output_iterable, "job", "\n".join(section))
def print_molecule_section(output_iterable, program_state):
"""Print $molecule section with data from program_state."""
section = list()
section.append(f" {program_state.charge} {program_state.spin}")
for _atom, (x, y, z) in zip(program_state.atoms,
program_state.input_structure.as_angstrom()):
section.append(f" {_atom.symbol} {x:12.6f} {y:12.6f} {z:12.6f}")
print_section(output_iterable, "molecule", "\n".join(section))
section = list()
for i, _atom in enumerate(program_state.atoms, 1):
section.append(f" {i:< 3d} {_atom.mass:10.5f}")
print_section(output_iterable, "isotope", "\n".join(section))
def print_frequency_data_section(output_iterable, program_state):
"""Print $frequencies section with data from program_state."""
section = list()
for frequency, reduced_mass, force_constant, mode_displacement in zip(
program_state.frequencies.as_recip_cm(),
program_state.reduced_masses.as_amu(),
program_state.force_constants.as_millidyne_per_angstrom(),
program_state.mode_displacements):
section.append(f" {frequency:10.4f} {reduced_mass:7.4f} "
f"{force_constant:7.4f}")
for x, y, z in mode_displacement.as_angstrom():
section.append(f" {x:8.5f} {y:8.5f} {z:8.5f}")
section.append("\n")
section.pop()
print_section(output_iterable, "frequency_data", "".join(section))
def print_output_comment(input_iterable, output_iterable):
"""Print comment with frequency file name and date of parsing."""
from datetime import datetime
import os
comment = list()
comment.append(" Frequency and molecule data parsed ")
if input_iterable != sys.stdin:
comment.append("from ")
comment.append(os.path.basename(input_iterable.name))
comment.append(" ")
else:
try:
name = os.readlink('/proc/self/fd/0').split('/')[-1].split('.')[0]
comment.append("from ")
comment.append(name)
comment.append(" ")
except FileNotFoundError:
comment.append("from <stdin> ")
comment.append(datetime.now().strftime("on %d-%b-%Y at %X"))
print_section(output_iterable, "comment", "".join(comment))
if __name__ == "__main__":
main()
|
DanielEss-lab/milo
|
milo_1_0_3/tools/parse_frequencies.py
|
parse_frequencies.py
|
py
| 13,885 |
python
|
en
|
code
| 3 |
github-code
|
6
|
2534129469
|
# -*- coding: utf-8 -*-
"""
Geometric transformations on 3D point cloud.
Created on Wed Apr 10 11:00:00 2019
Author: Prasun Roy | CVPRU-ISICAL (http://www.isical.ac.in/~cvpr)
GitHub: https://github.com/prasunroy/sign-language
"""
import copy
import math
import numpy as np
class Transforms3D(object):
@staticmethod
def rotate(pointcloud):
assert pointcloud.__class__.__name__ == 'PointCloud3D' \
and not pointcloud._data is None
pc = copy.deepcopy(pointcloud)
for frame, joints in enumerate(pc._data):
# Reshape joints into Nx3 matrix.
joints = joints.reshape(-1, 3)
# Get left shoulder L, right shoulder R and spine center C.
L = joints[pointcloud.JointType_ShoulderLeft, :]
R = joints[pointcloud.JointType_ShoulderRight, :]
C = joints[pointcloud.JointType_SpineMid, :]
# Calculate unit vector n along the normal to LRC-plane.
CL = L - C
CR = R - C
n = np.cross(CL, CR) / (np.linalg.norm(CL) * np.linalg.norm(CR))
# Calculate angle between the projection
# of n on XZ-plane and Z-axis.
n = n * np.array([1, 0, 1])
k = np.array([0, 0, -1])
ratio = np.dot(n, k) / (np.linalg.norm(n) * np.linalg.norm(k))
theta = np.arccos(ratio)
theta = math.copysign(theta, n[0])
# Construct transformation matrix R for rotation around Y-axis.
R = np.array([
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]
])
# Perform rotation and update data.
joints = np.dot(R, joints.T).T
pc._data[frame] = joints.reshape(-1)
return pc
@staticmethod
def translate(pointcloud):
assert pointcloud.__class__.__name__ == 'PointCloud3D' \
and not pointcloud._data is None
pc = copy.deepcopy(pointcloud)
for frame, joints in enumerate(pc._data):
# Reshape joints into Nx3 matrix.
joints = joints.reshape(-1, 3)
# Construct transformation matrix T for translation.
C = joints[pointcloud.JointType_SpineMid, :]
T = -C
# Perform translation and update data.
joints += T
pc._data[frame] = joints.reshape(-1)
return pc
@staticmethod
def transform(pointcloud):
assert pointcloud.__class__.__name__ == 'PointCloud3D' \
and not pointcloud._data is None
return Transforms3D.translate(Transforms3D.rotate(pointcloud))
|
prasunroy/sign-language
|
transforms.py
|
transforms.py
|
py
| 2,810 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30085050335
|
from django.shortcuts import render
from django.http import JsonResponse
from category.models import Category
# Create your views here.
def jsons(data = None, errorCode = 0, cookies = ''):
if data is None:
data = []
return JsonResponse({'errorCode': errorCode, 'data': data, 'cookies': cookies})
def categoryGetAll(request):
categories = Category.objects.all()
return jsons([dict(category.body()) for category in categories])
|
jeremyytann/BUAA-SE-LetStudy
|
Code/backend/category/views.py
|
views.py
|
py
| 456 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40333302045
|
#import pyperclip
import csv
import write_cv_main_functions as wc
#### grants and awards CV data
def compile_teaching(teaching_file, table_spacing,lwidth,rwidth):
teaching_txt=wc.header_setup('Teaching Experience', table_spacing,lwidth,rwidth, False)
teaching_dict=wc.convert_csv_to_dict(teaching_file,'Sorting Date')
institutions_set=set()
institutions_count_dict={}
institutions_index_count_dict={}
institution_txt_dict={}
ordering_institutions_dict={}
# build the set of distint institutions and keep track of how many
# times taught at each institution (so that your code knows when to
# close the table environment)
for i in range(len(teaching_dict)):
row=teaching_dict[i]
role=row['Role']
course=row['Course']
institution=row['Institution']
if institution in institutions_set:
institutions_count_dict[institution]+=1
else:
institutions_set.add(institution)
institutions_count_dict[institution]=1
institutions_index_count_dict[institution]=0
# subheaders for each institution added to the dictionary of strings
# the dictionary of strings is called institution_txt_dict
# key = institutions (string) value = text to add to the document
# (string)
for institution in institutions_set:
## start the table for each institution
institution_txt_dict[institution]= wc.subheader_setup(institution, table_spacing, lwidth, rwidth)
## prepare the ordering_institutions_dict for later comparison
ordering_institutions_dict[institution]=0
# add rows to the appropriate table in the .tex file for each entry of
# the csv. There is a table for each institution
for i in range(len(teaching_dict)):
row=teaching_dict[i]
role=row['Role']
course_no=row['Course Number']
course=row['Course']
if course_no!='':
course='{\\normalfont %s:} %s' % (course_no, course)
institution=row['Institution']
start_sem=row['Start Semester'][:2]
start_year=row['Start Year'][2:]
end_sem=row['End Semester'][:2]
end_year=row['End Year'][2:]
note=row['Note']
description=row['Description']
description=''
date=wc.format_date('', '', start_year, start_sem, '', '', end_year, end_sem, active=False)
## trying to identify the most recent date
if int(start_year)>int(ordering_institutions_dict[institution]):
ordering_institutions_dict[institution]=start_year
for given_institution in institutions_set:
if institution==given_institution:
institutions_index_count_dict[given_institution]+=1
## so you know when to close the table; boolean value that
## comparees length of dicts
is_end=institutions_count_dict[given_institution]==institutions_index_count_dict[given_institution]
## add text to the appropriate institution
institution_txt_dict[given_institution]+=wc.create_table_entry(course,date,is_end,role, '', note, description,False)
# add everybody to the main string
# for key in institutions_set:
# teaching_txt+=institution_txt_dict[key]
#sort in reverse chronological order
sorted_dict=dict(sorted(ordering_institutions_dict.items(),key= lambda x:x[1],reverse=True))
#choose this option is you want to sort in chronological order
# sorted_dict=dict(sorted(ordering_institutions_dict.items(),key= lambda x:x[1]))
for entry in sorted_dict:
teaching_txt+=institution_txt_dict[entry]
return teaching_txt
|
hdbray/cv_builder
|
write_cv_teaching.py
|
write_cv_teaching.py
|
py
| 3,742 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19788096058
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
from uuid import uuid4
import pygame
from .clock import Clock, clock
from .keyboard import Keyboard
from .screen import Screen
from .utils.event_dispatcher import EventDispatcher
if TYPE_CHECKING:
from .application import Application
class Scene(EventDispatcher):
"""
The idea and the original code was taken from [EzPyGame](https://github.com/Mahi/EzPyGame)
An isolated scene which can be ran by an application.
Create your own scene by subclassing and overriding any methods.
Example:
```
class Menu(Scene):
def __init__(self):
self.font = pygame.font.Font(...)
def on_enter(self, previous_scene):
self.title = 'Main Menu'
self.resolution = (640, 480)
self.update_rate = 30
def draw(self, screen):
pygame.draw.rect(...)
text = self.font.render(...)
screen.blit(text, ...)
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
game_size = self._get_game_size(event.pos)
self.change_scene(Game(game_size))
def _get_game_size(self, mouse_pos_upon_click):
...
class Game(pgz.Scene):
title = 'The Game!'
resolution = (1280, 720)
update_rate = 60
def __init__(self, size):
super().__init__()
self.size = size
self.player = ...
...
def on_enter(self, previous_scene):
super().on_enter(previous_scene)
self.previous_scene = previous_scene
def draw(self, screen):
self.player.draw(screen)
for enemy in self.enemies:
...
def update(self, dt):
self.player.move(dt)
...
if self.player.is_dead():
self.change_scene(self.previous_scene)
elif self.player_won():
self.change_scene(...)
def handle_event(self, event):
... # Player movement etc.
```
The above two classes use different approaches for changing
the application's settings when the scene is entered:
1. Manually set them in `on_enter`, as seen in `Menu`
2. Use class variables, as I did with `Game`
When using class variables (2), you can leave out any setting
(defaults to `None`) to not override that particular setting.
If you override `on_enter` in the subclass, you must call
`super().on_enter(previous_scene)` to use the class variables.
These settings can further be overridden in individual instances:
```
my_scene0 = MyScene()
my_scene0.resolution = (1280, 720)
my_scene1 = MyScene(title='My Second Awesome Scene')
```
Example:
Shortcuts foe event gandling while `Scene` subclassing.
```
def on_mouse_up(self, pos, button):
# Override this for easier events handling.
pass
def on_mouse_down(self, pos, button):
# Override this for easier events handling.
pass
def on_mouse_move(self, pos):
# Override this for easier events handling.
pass
def on_key_down(self, key):
# Override this for easier events handling.
pass
def on_key_up(self, key):
# Override this for easier events handling.
pass
```
"""
_title: Optional[str] = None
_resolution: Optional[Tuple[int, int]] = None
_update_rate: Optional[int] = None
def __init__(self, title: Optional[str] = None, resolution=None, update_rate: Optional[int] = None) -> None:
self._application: Optional["Application"] = None
if title is not None:
self._title = title
if resolution is not None:
self._resolution = resolution
if update_rate is not None:
self._update_rate = update_rate
self._keyboard = Keyboard()
# Client data is the data was provided by the client during the handshake: it's usually stuff like player name, avatar, etc
self._client_data: Dict[str, Any] = {}
# The scene UUID is used for communication
self._scene_uuid = str(uuid4())
@property
def scene_uuid(self) -> str:
"""
Get scene UUID.
"""
return self._scene_uuid
def set_client_data(self, client_data: Dict[str, Any]) -> None:
self._client_data = client_data
@property
def client_data(self) -> Dict[str, Any]:
"""
Get data provided by client side.
"""
return self._client_data
def change_scene(self, new_scene: Optional["Scene"]) -> None:
if not self._application:
raise Exception("Application was not configured properly.")
self._application.change_scene(new_scene)
@property
def title(self) -> str:
"""Get application title
Returns:
str: application title
"""
if not self._application:
raise Exception("Application was not configured properly.")
return self._application.title
@title.setter
def title(self, value: str) -> None:
"""Change application title
Args:
value (str): application title to set
"""
if not self._application:
print("Warning: application was not configured - 'title' setting was ignored")
return
self._application.title = value
@property
def resolution(self) -> Tuple[int, int]:
"""Get application screen resolution
Returns:
Tuple[int, int]: application screen resolution
"""
if not self._application:
raise Exception("Application was not configured properly.")
return self._application.resolution
@resolution.setter
def resolution(self, value: Tuple[int, int]) -> None:
"""Change application screen resolution
Args:
value (Tuple[int, int]): application screen resolution to use
"""
if not self._application:
print("Warning: application was not configured - 'resolution' setting was ignored")
return
self._application.resolution = value
@property
def update_rate(self) -> int:
"""Get application update rate
Returns:
int: application update rate
"""
if not self._application:
raise Exception("Application was not configured properly.")
return self._application.update_rate
@update_rate.setter
def update_rate(self, value: int) -> None:
"""Change application update rate
Args:
value (int): application update rate to set
"""
if not self._application:
print("Warning: application was not configured - 'update_rate' setting was ignored")
return
self._application.update_rate = value
@property
def clock(self) -> Clock:
"""
Get `Clock` object.
Actually returns the global clock object.
Returns:
Clock: clock object
"""
return clock
@property
def keyboard(self) -> Keyboard:
"""
Get `Keyboard` object.
Returns:
Keyboard: keyboard object
"""
return self._keyboard
def draw(self, screen: Screen) -> None:
"""
Override this with the scene drawing.
Args:
screen (Screen): screen to draw the scene on
"""
def update(self, dt: float) -> None:
"""
Override this with the scene update tick.
Args:
dt (float): time in milliseconds since the last update
"""
def handle_event(self, event: pygame.event.Event) -> None:
"""
Override this to handle an event in the scene.
All of `pygame`'s events are sent here, so filtering
should be applied manually in the subclass.
Args:
event (pygame.event.Event): event to handle
"""
if event.type == pygame.KEYDOWN:
self._keyboard._press(event.key)
elif event.type == pygame.KEYUP:
self._keyboard._release(event.key)
def on_enter(self, previous_scene: Optional["Scene"]) -> None:
"""
Override this to initialize upon scene entering.
If you override this method and want to use class variables
to change the application's settings, you must call
``super().on_enter(previous_scene)`` in the subclass.
Args:
previous_scene (Optional[Scene]): previous scene was running
"""
for attr in ("_title", "_resolution", "_update_rate"):
value = getattr(self, attr)
if value is not None:
if self._application is None:
print(f"Warning: application was not configured - '{attr}' setting was ignored")
continue
setattr(self._application, attr.lower(), value)
# Set event dispatcher
self.load_handlers()
def on_exit(self, next_scene: Optional["Scene"]) -> None:
"""
Override this to deinitialize upon scene exiting.
Args:
next_scene (Optional[Scene]): next scene to run
"""
|
kdeyev/pgz
|
pgz/scene.py
|
scene.py
|
py
| 9,429 |
python
|
en
|
code
| 4 |
github-code
|
6
|
24605950735
|
import logging
from Common import removeLinkID
class Group(object):
def __init__(self, DB):
self.DB = DB
def __call__(self, msg):
if not msg.isGroup():
msg.Reply("This function only for group.")
elif len(msg.args) != 1 or msg.args[0] not in ('this', 'all'):
msg.Reply("Invalid arguments.\nUse: /unlink group < this | all >")
else:
if msg.args[0] == 'this':
result = self.DB().Exec(
"""
SELECT link_id, COUNT(*)
FROM public.link_group
WHERE link_id IN (
SELECT link_id
FROM public.link_group
WHERE group_id = %s
AND user_id = %s
)
GROUP BY link_id;
""",
(
msg.GroupID(),
msg.UserID()
)
).Fetch()
if result:
link_id, count = result
if count == 1:
self.DB().Exec("DELETE FROM public.link_group WHERE link_id = %s;", [link_id])
removeLinkID(self.DB(), link_id)
else:
self.DB().Exec("DELETE FROM public.link_group WHERE group_id = %s;", [msg.GroupID()])
msg.Reply("Now unlinked.")
else:
msg.Reply("Your group not in linked or you not link creator.")
elif msg.args[0] == 'all':
result = self.DB().Exec(
"SELECT link_id FROM public.link_group WHERE group_id = %s AND user_id = %s GROUP BY link_id;",
(
msg.GroupID(),
msg.UserID()
)
).Fetch()
if result:
link_id = result[0]
self.DB().Exec("DELETE FROM public.link_group WHERE link_id = %s;", [link_id])
removeLinkID(self.DB(), link_id)
msg.Reply("Now all unlinked.")
else:
msg.Reply("Your group not in linked or you not link creator.")
|
hans00/MessageBot
|
Features/Unlink/Group.py
|
Group.py
|
py
| 1,624 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8629709747
|
from flask import Flask,request
app = Flask(__name__)
@app.route('/')
def home():
return "Bem-Vindo"
@app.route('/calculo')
def add():
a = 10
b = 10
return str(a+b)
if __name__ == '__main__':
app.run()
|
kaibernu/MLDeploy
|
API.py
|
API.py
|
py
| 231 |
python
|
en
|
code
| 2 |
github-code
|
6
|
71361812987
|
import sys
import mysql.connector
from awsglue.utils import getResolvedOptions
params = [
'db_host',
'db_port',
'db_user',
'db_password',
'db_database',
'ticket_id_to_be_updated'
]
args = getResolvedOptions(sys.argv, params)
cnx = mysql.connector.connect(
host=args['db_host'],
port=args['db_port'],
user=args['db_user'],
password=args['db_password'],
database=args['db_database']
)
cur = cnx.cursor()
def update_data(cursor, connection):
ticket_id = args['ticket_id_to_be_updated']
print("Selecting one record from table {}".format("customer"))
cursor.execute("SELECT customer_id FROM customer ORDER BY RAND() LIMIT 1")
rows = cursor.fetchall()
customer_id = ""
for row in rows:
customer_id = row[0]
update_event = ("UPDATE ticket_activity SET purchased_by={}, updated_at=now() WHERE ticket_id={}".format(customer_id, ticket_id))
cursor.execute(update_event)
connection.commit()
def read_data(cursor):
cursor.execute("SELECT * FROM ticket_activity")
rows = cursor.fetchall()
for row in rows:
print(row)
read_data(cur)
update_data(cur, cnx)
read_data(cur)
cur.close()
cnx.close()
|
bhavik161/studio
|
rds/rds_upsert_data.py
|
rds_upsert_data.py
|
py
| 1,200 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22020962951
|
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
import rich
import seaborn as sns
import typer
from boiling_learning.app.configuration import configure
from boiling_learning.app.datasets.bridged.boiling1d import DEFAULT_BOILING_OUTLIER_FILTER
from boiling_learning.app.datasets.preprocessed.boiling1d import boiling_datasets
from boiling_learning.app.paths import studies_path
from boiling_learning.app.training.boiling1d import DEFAULT_BOILING_HEAT_FLUX_TARGET
from boiling_learning.datasets.sliceable import targets
from boiling_learning.image_datasets import ImageDatasetTriplet
from boiling_learning.lazy import LazyDescribed
from boiling_learning.utils.pathutils import resolve
app = typer.Typer()
console = rich.console.Console()
@app.command()
def boiling1d() -> None:
configure(
force_gpu_allow_growth=True,
use_xla=True,
require_gpu=True,
)
datasets = boiling_datasets(direct_visualization=True)
f, axes = plt.subplots(len(datasets), 1, figsize=(6, 4))
for index, (ax, dataset) in enumerate(zip(axes, datasets)):
data = _sorted_boiling_datasets(dataset)
sns.scatterplot(
ax=ax,
data=data,
x='index',
y='heat flux',
hue='class',
alpha=0.5,
)
ax.set_title(f'Dataset {index}')
f.savefig(str(_data_split_study_path() / 'boiling1d.pdf'))
@app.command()
def condensation(
each: int = typer.Option(60),
normalize: bool = typer.Option(...),
) -> None:
raise NotImplementedError
def _sorted_boiling_datasets(datasets: LazyDescribed[ImageDatasetTriplet]) -> pd.DataFrame:
ds_train, ds_val, ds_test = datasets()
df = pd.DataFrame(
sorted(
(
(
target['nominal_power'],
target[DEFAULT_BOILING_HEAT_FLUX_TARGET],
target['elapsed_time'],
class_name,
)
for class_name, ds in (
('train', ds_train),
('val', ds_val),
('test', ds_test),
)
for target in targets(ds).prefetch(1024)
if DEFAULT_BOILING_OUTLIER_FILTER()(None, target)
),
key=lambda power_hf_et_class: (
power_hf_et_class[0],
power_hf_et_class[2],
),
),
columns=['nominal power', 'heat flux', 'elapsed time', 'class'],
)
df['index'] = range(len(df))
return df
def _data_split_study_path() -> Path:
return resolve(studies_path() / 'data-split', dir=True)
|
ruancomelli/boiling-learning
|
boiling_learning/app/studies/data_split.py
|
data_split.py
|
py
| 2,677 |
python
|
en
|
code
| 7 |
github-code
|
6
|
71365190588
|
import torch
from torchvision import transforms
from torch.autograd import Variable
from dataset import DatasetFromFolder
from model import Generator
import utils
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=False, default='facades', help='input dataset')
parser.add_argument('--direction', required=False, default='BtoA', help='input and target image order')
parser.add_argument('--batch_size', type=int, default=1, help='test batch size')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--input_size', type=int, default=1024, help='input size')
params = parser.parse_args()
print(params)
# Directories for loading data and saving results
data_dir = '../Data/' + params.dataset + '/'
save_dir = params.dataset + '_test_results/'
model_dir = params.dataset + '_model/'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
# Data pre-processing
test_transform = transforms.Compose([transforms.Scale(params.input_size),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
# Test data
test_data = DatasetFromFolder(data_dir, subfolder='test', direction=params.direction, transform=test_transform)
test_data_loader = torch.utils.data.DataLoader(dataset=test_data,
batch_size=params.batch_size,
shuffle=False)
# Load model
G = Generator(3, params.ngf, 3)
G.cuda()
G.load_state_dict(torch.load(model_dir + 'generator_param.pkl'))
# Test
for i, (input, target) in enumerate(test_data_loader):
# input & target image data
x_ = Variable(input.cuda())
y_ = Variable(target.cuda())
gen_image = G(x_)
gen_image = gen_image.cpu().data
# Show result for test data
utils.plot_test_result(input, target, gen_image, i, training=False, save=True, save_dir=save_dir)
print('%d images are generated.' % (i + 1))
|
togheppi/pix2pix
|
pix2pix_test.py
|
pix2pix_test.py
|
py
| 2,081 |
python
|
en
|
code
| 46 |
github-code
|
6
|
44938119316
|
import numpy as np
import redis
import struct
import cv2
import time
import curved_paths_coords as pc
from threading import Thread
r = redis.Redis(host='192.168.0.101', port=6379, db=0)
log_sensing_running =\
log_navigation_running =\
log_batterymeter_running =\
log_driving_running =\
log_detect_cam =\
voltages1_and_2 =\
log_sensing_time=\
log_target_distance_angle=\
log_path=\
log_path_min_cost=\
log_current_speed=\
log_in_front_of_car=\
log_uptime =\
path_received =\
received_target_coords = None
mapW = 400
mapH = 400
last_time=0
font = cv2.FONT_HERSHEY_SIMPLEX
map_refresh = 0.1 # interval between map refresh
map = np.full((mapW,mapH,3),100, np.uint8)
def redis_to_map(redis,name):
encoded = redis.get(name)
if encoded is None:
return np.full((mapW,mapH,3),100, np.uint8)
else:
h, w = struct.unpack('>II', encoded[:8])
array = np.frombuffer(encoded, dtype=np.uint8, offset=8).reshape(h, w, 1)
array = cv2.cvtColor(array,cv2.COLOR_GRAY2RGB)
return array
def update_data():
log_sensing_time_received = r.get('log_sensing_time')
if log_sensing_time_received is not None:
log_sensing_time = round(float(log_sensing_time_received),2)
else:
log_sensing_time = 0
log_target_distance_received = r.get('log_target_distance')
if log_target_distance_received is not None:
log_target_distance = round(float(log_target_distance_received),2)
else:
log_target_distance = "None"
log_target_angle_received = r.get('log_target_angle')
if log_target_angle_received is not None:
log_target_angle = round(float(log_target_angle_received),2)
else:
log_target_angle = "None"
log_target_distance_angle = str(log_target_distance) + " " + str(log_target_angle)
log_path_received = r.get('path')
if log_path_received is not None:
log_path = float(log_path_received)
else:
log_path = "None"
log_path_min_cost_received = r.get('path_min_cost')
if log_path_min_cost_received is not None:
log_path_min_cost = round(float(log_path_min_cost_received),2)
else:
log_path_min_cost = "None"
log_current_speed_received = r.get('current_speed')
if log_current_speed_received is not None:
log_current_speed = round(float(log_current_speed_received),2)
else:
log_current_speed = "None"
log_in_front_of_car_received = r.get('log_in_front_of_car')
if log_in_front_of_car_received is not None:
log_in_front_of_car = float(log_in_front_of_car_received)
else:
log_in_front_of_car = "None"
voltages_received = r.get('voltages')
if voltages_received is not None:
voltages = np.round(np.array(struct.unpack('%sf' %2, voltages_received)),2)
else:
voltages = [0,0]
voltages1_and_2 = str(voltages[0]) + " " + str(voltages[1])
log_uptime_received = r.get('log_uptime')
if log_uptime_received is not None:
log_uptime = int(float(log_uptime_received))
else:
log_uptime = "None"
log_sensing_running_received = r.get('log_sensing_running')
if log_sensing_running_received is not None:
log_sensing_running = str(log_sensing_running_received.decode("utf-8") )
else:
log_sensing_running = "off"
log_navigation_running_received = r.get('log_navigation_running')
if log_navigation_running_received is not None:
log_navigation_running = str(log_navigation_running_received.decode("utf-8") )
else:
log_navigation_running = "off"
log_batterymeter_running_received = r.get('log_batterymeter_running')
if log_batterymeter_running_received is not None:
log_batterymeter_running = str(log_batterymeter_running_received.decode("utf-8") )
else:
log_batterymeter_running = "off"
log_driving_running_received = r.get('log_driving_running')
if log_driving_running_received is not None:
log_driving_running = str(log_driving_running_received.decode("utf-8") )
else:
log_driving_running = "off"
log_detect_cam_received = r.get('log_detect_cam')
if log_detect_cam_received is not None:
log_detect_cam = str(log_detect_cam_received.decode("utf-8") )
else:
log_detect_cam = "None"
map = redis_to_map(r, "map")
path_received = r.get('path')
received_target_coords = r.get('target_car_coords')
def display_data():
cv2.rectangle(map,(187,242),(213,305),(0, 100, 255),-1) #draw car
visible_cone = np.array([[213, 242], [187, 242], [0, 0], [400, 0]], np.int32)
visible_cone = visible_cone.reshape((-1, 1, 2))
cv2.polylines(map, [visible_cone], True, (255,255,255), 1)
color_path = (0,255,0)
if path_received is None:
pass
elif int(path_received) == -1:
pass
else:
path = int(path_received)
if path > 5:
path_lookup = path - 5
l = -1
else:
path_lookup = path
l = 1
for square in range(0, 4):
#print(path,square)
x0 = int(l * pc.paths[path_lookup]['coords'][square][0] / 10 + mapW / 2)
y0 = mapH - int(pc.paths[path_lookup]['coords'][square][1] / 10 + 150)
x1 = int(l * pc.paths[path_lookup]['coords'][square][2] / 10 + mapW / 2)
y1 = mapH - int(pc.paths[path_lookup]['coords'][square][3] / 10 + 150)
x2 = int(l * pc.paths[path_lookup]['coords'][square + 1][0] / 10 + mapW / 2)
y2 = mapH - int(pc.paths[path_lookup]['coords'][square + 1][1] / 10 + 150)
x3 = int(l * pc.paths[path_lookup]['coords'][square + 1][2] / 10 + mapW / 2)
y3 = mapH - int(pc.paths[path_lookup]['coords'][square + 1][3] / 10 + 150)
poly = np.array([[x0,y0],[x1,y1],[x3,y3],[x2,y2]])
poly = poly.reshape((-1, 1, 2))
cv2.polylines(map,[poly],True,(255,255,255),1)
if received_target_coords is not None:
target_car_coords = np.array(struct.unpack('%sf' %3, received_target_coords))
mx = int(target_car_coords[0] * 100 + mapW / 2)
my = int(mapH - target_car_coords[2] * 100)
cv2.line(map, (int(mapW/2), mapH - 150), (mx, my - 150), (0,0,255), thickness=3)
topic_left=['sensing', \
'navigation',\
'batterymeter',\
'driving',\
'detect cam',\
]
logs_left=[log_sensing_running, \
log_navigation_running,\
log_batterymeter_running,\
log_driving_running,\
log_detect_cam\
]
topic_right=['battery voltages', \
'sensing time',\
'target dist, angle',\
'current path',\
'path min cost',\
'current speed',\
'obstacle height',\
'uptime'\
]
logs_right=[voltages1_and_2, \
log_sensing_time,\
log_target_distance_angle,\
log_path,\
log_path_min_cost,\
log_current_speed,\
log_in_front_of_car,\
log_uptime\
]
count = 1
for text in topic_left:
count +=1
cv2.putText(map, str(text), (20, 300 + 10 * count), font, 0.4, (255,255,255), 1)
count = 1
for text in logs_left:
count +=1
cv2.putText(map, str(text), (140, 300 + 10 * count), font, 0.4, (255,255,255), 1)
count = 1
for text in topic_right:
count +=1
cv2.putText(map, str(text), (187, 300 + 10 * count), font, 0.4, (255,255,255), 1)
count = 1
for text in logs_right:
count +=1
cv2.putText(map, str(text), (310, 300 + 10 * count), font, 0.4, (255,255,255), 1)
def try_to_connect():
while True:
try:
cv2.namedWindow('map', cv2.WINDOW_NORMAL)
display_data()
cv2.imshow('map', map)
key = cv2.waitKey(1)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
r.ping()
except redis.exceptions.ConnectionError as e:
print("retrying connection")
cv2.putText(map, "No connection to car", (35, 150), font, 1, (0,0,255), 3)
continue
else:
break
print("connected")
connected_to_redis = True
connected_to_redis = False
def display():
global connected_to_redis
while True:
try:
update_data()
print("here")
cv2.namedWindow('map', cv2.WINDOW_NORMAL)
display_data()
cv2.imshow('map', map)
if not connected_to_redis:
try_to_connect()
time.sleep(map_refresh)
except redis.exceptions.ConnectionError as e:
try_to_connect()
x = Thread(target=display, args=())
|
julianx4/skippycar
|
test.py
|
test.py
|
py
| 8,784 |
python
|
en
|
code
| 3 |
github-code
|
6
|
33273926923
|
from PDBParseBase import PDBParserBase #get_site_header_seq_info
import time, os,datetime,logging,gzip,pickle #get_site_header_seq_info
def mkdir(path):
#Created uncompress path folder
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print(path + " Created folder sucessful!")
return True
else:
#print ("this path is exist")
return False
def get_site_header_seq_info(rootdir,savefilepath):
"""extract header\sequence\site\remark800 info in rootdir.
and then, save them as a pickle content with list[1_site,2_header,3_sequence]
rootdir = "/home/RaidDisk/pdbfiles/updb"
savefilepath = "/home/zhaiyh884/20190614_new_data/0615_data"
scan all pdb files need about 60 min.
"""
count = 0
counter_mem = 0
pdbbase = PDBParserBase()
pdb_seq_info = pdbbase.get_sequence_fromATOM('/home/RaidDisk/pdbfiles/updb/pdb/a2/pdb2a2q.ent')
print(pdb_seq_info)
print(pdb_eq_info)
"""
#test cetern item
pdb_header_info = pdbbase.get_header_info('/home/RaidDisk/pdbfiles/updb/pdb/a2/pdb2a2q.ent')
pdb_site_info = pdbbase.get_site_info('/home/RaidDisk/pdbfiles/updb/pdb/a2/pdb2a2q.ent')
pdb_seq_info = pdbbase.get_sequence_fromSEQ('/home/RaidDisk/pdbfiles/updb/pdb/a2/pdb2a2q.ent')
print(pdb_header_info)
print(pdb_site_info)
print(pdb_seq_info) """
for parent,dirnames,filenames in os.walk(rootdir):
for filename in filenames:
data_3_items = []
#analyzedata
pdb_site_info = pdbbase.get_site_info(os.path.join(parent,filename))
data_3_items.append(pdb_site_info)
if not pdb_site_info :
print("do not have site:" + filename)
"in order to save some time"
continue
pdb_header_info = pdbbase.get_header_info(os.path.join(parent,filename))
data_3_items.append(pdb_header_info)
#print(pdb_header_info)
pdb_seq_from_SEQ__info = pdbbase.get_sequence_fromSEQ(os.path.join(parent,filename))
data_3_items.append(pdb_seq_from_SEQ__info)
print("pdb_seq_from_SEQ__info")
print(pdb_seq_from_SEQ__info)
pdb_seq_from_ATOM_info = pdbbase.get_sequence_fromATOM(os.path.join(parent,filename))
data_3_items.append(pdb_seq_from_ATOM_info)
print("pdb_seq_from_ATOM_info")
print(pdb_seq_from_ATOM_info)
#save data
if not pdb_site_info :
pass
else:
dirname = filename[4:6]
new_Filepath = savefilepath +"/" + str(dirname)+"/"
mkdir(new_Filepath)
new_filename = filename[3:7] + ".pickle"
with open(new_Filepath + new_filename,"wb") as dbFile:
pickle.dump(data_3_items,dbFile)
"""with open(new_Filepath + new_filename,"rb") as dbFile:
file = pickle.load(dbFile) """
pass
pass
def find_memberain_protein(rootdir,savefilepath):
#find all protein that header have "mem"in it and save them into savefilepath
count = 0
counter_mem = 0
pdbbase = PDBParserBase()
pdb_header_info = {}
for parent,dirnames,filenames in os.walk(rootdir):
for filename in filenames:
count = count + 1
dirname = filename[3:7]
pdb_header_info = pdbbase.get_header_info(os.path.join(parent,filename))
if "MEM" in pdb_header_info["HEADER_classification"]:
counter_mem = counter_mem + 1
cmd = 'cp ' + str(os.path.join(parent,filename)) + ' ' + str(os.path.join(savefilepath,filename))
os.system(cmd)
pass
def find_all_sites(rootdir):
# use the pickles that contain header\sequence\site\remark800 info
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#
total_site = []
description_null = 0
for parent, dirnames, filenames in os.walk(rootdir):
for filename in filenames:
# walk into one file
pro_id = filename[0:4]
file_path = os.path.join(parent, filename)
with open(file_path, "rb") as dbFile:
file = pickle.load(dbFile)
# print(file)
site = file[0]
header = file[1]
seq = file[2]
item_dict = {}
for item in site:
#{'1KMH_A': [{'position': {'51': 'G', '65': 'L', '131': 'E', '274': 'M', '297': 'R'}, 'site_description': 'RESIDUE TTX B 499'}],
# '1KMH_B': [{'position': {'81': 'A', '82': 'T', '83': 'D'}, 'site_description': 'RESIDUE TTX B 499'}]}
#in this loop item means protein_sequence name: 1KMH_A
sites = []
for site_item in site[item]:
#site_item means every record in one sequence
description = site_item["site_description"]
descriptions = description.split()
try:
#find the binding name
if "res" in descriptions[1]:
sites.append(descriptions[2])
else:
sites.append(descriptions[1])
except IndexError:
description_null = description_null+1
item_dict[item] = sites
total_site.append(item_dict)
# print(total_site)
print("len(membrane_total_site):")
print(len(total_site))
print("null_discription:")
print(description_null)
with open("/home/zhaiyh884/20190614_new_data/total_site.pickle", "wb") as dbFile:
pass
pickle.dump(total_site, dbFile)
pass
def count_sites(file):
#used to count sites and analyzedata.
#file = "/home/zhaiyh884/20190614_new_data/total_site.pickle"
#scan all the data and count every site's apperance
with open(file, "rb") as dbFile:
file = pickle.load(dbFile)
site_dicts = {}
total_site_num = 0
for item in file:
for seq_id in item:
#print(seq_id)
for site_name in item[seq_id]:
print(site_name)
site_dicts[site_name] = site_dicts[site_name] + 1 if site_name in site_dicts else 1
total_site_num = total_site_num + 1
#if site_name in site_dicts.keys():
# site_dicts[site_name] = site_dicts[site_name] + 1 if site_name in site_dicts else 1
#print(site_name)
with open("/home/zhaiyh884/20190614_new_data/site_numbers.pickle", "wb") as dbFile:
pickle.dump(site_dicts, dbFile)
print(site_dicts)
print("total_site_num:")
print(total_site_num)
print("site_dicts items num:")
print(len(site_dicts))
def sites_anylize():
with open("site_numbers.pickle","rb") as dbFile:
file = pickle.load(dbFile)
with open("hetlist.pickle","rb") as dbFile_drug:
file_drug = pickle.load(dbFile_drug)
sites_number = 0
number_counter = {}
drug_site = {}
for site_name in file:
if site_name in file_drug:
sites_number = sites_number + file[site_name]
# used to count all numbers of drugs_binding object
number_of_site = file[site_name]
# number_of_site used to sign the numbers which apperence
number_counter[number_of_site] = number_counter[number_of_site] + 1 if number_of_site in number_counter else 1
# the dict to store the number of times
drug_site[site_name] = file[site_name]
#form a new site of drug sites
print(sites_number)
print(number_counter)
print(sorted(file.items(),key=lambda x:x[1]))
print("#@!#!$!@#%!#%")
print(sorted(drug_site.items(),key=lambda x:x[1]))
pass
def find_memberain_sites(rootdir):
# use the pickles that contain header\sequence\site\remark800 info
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#
total_site = []
description_null = 0
for parent, dirnames, filenames in os.walk(rootdir):
for filename in filenames:
# walk into one file
pro_id = filename[0:4]
file_path = os.path.join(parent, filename)
with open(file_path, "rb") as dbFile:
file = pickle.load(dbFile)
# print(file)
site = file[0]
header = file[1]
seq = file[2]
#select membrane protein
if "MEM" not in header["HEADER_classification"]:
continue
# use site info only
item_dict = {}
for item in site:
#{'1KMH_A': [{'position': {'51': 'G', '65': 'L', '131': 'E', '274': 'M', '297': 'R'}, 'site_description': 'RESIDUE TTX B 499'}],
# '1KMH_B': [{'position': {'81': 'A', '82': 'T', '83': 'D'}, 'site_description': 'RESIDUE TTX B 499'}]}
#in this loop item means protein_sequence name: 1KMH_A
sites = []
for site_item in site[item]:
#site_item means every record in one sequence
description = site_item["site_description"]
descriptions = description.split()
try:
#find the binding name
if "res" in descriptions[1]:
sites.append(descriptions[2])
else:
sites.append(descriptions[1])
except IndexError:
description_null = description_null+1
item_dict[item] = sites
total_site.append(item_dict)
# print(total_site)
print("len(membrane_total_site):")
print(len(total_site))
print("null_discription:")
print(description_null)
with open("/home/zhaiyh884/20190614_new_data/membrane_total_site.pickle", "wb") as dbFile:
pass
pickle.dump(total_site, dbFile)
pass
def find_drug_releated_protein(rootdir):
# use the pickles that contain header\sequence\site\remark800 info
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#find the difference between proteins class and drug-releated-proteins class
with open("hetlist.pickle","rb") as dbFile_drug:
file_drug = pickle.load(dbFile_drug)
protein_classfication = []
drug_protein_classfication = []
protein_dict = {}
drug_releated_protein_dict = {}
description_null = 0
for parent, dirnames, filenames in os.walk(rootdir):
for filename in filenames:
# walk into one file
pro_id = filename[0:4]
file_path = os.path.join(parent, filename)
with open(file_path, "rb") as dbFile:
file = pickle.load(dbFile)
# print(file)
site = file[0]
header = file[1]
seq = file[2]
classification = header["HEADER_classification"]
protein_dict[classification] = protein_dict[classification] + 1 if classification in protein_dict else 1
#print(protein_dict)
drug_releated_protein_flag = 0
for item in site:
#{'1KMH_A': [{'position': {'51': 'G', '65': 'L', '131': 'E', '274': 'M', '297': 'R'}, 'site_description': 'RESIDUE TTX B 499'}],
# '1KMH_B': [{'position': {'81': 'A', '82': 'T', '83': 'D'}, 'site_description': 'RESIDUE TTX B 499'}]}
#in this loop item means protein_sequence name: 1KMH_A
sites = []
for site_item in site[item]:
#site_item means every record in one sequence
description = site_item["site_description"]
descriptions = description.split()
try:
#find the binding_object name
if "res" in descriptions[1]:
binding_object = descriptions[2]
elif "RESIDUES" in description and "THROUGH" in description:
binding_object = descriptions[2]
else:
binding_object = descriptions[1]
except IndexError:
description_null = description_null+1
#sites.append(binding_object)
if binding_object in file_drug:
drug_releated_protein_flag = 1
#print(drug_releated_protein_flag)
if drug_releated_protein_flag ==1:
drug_releated_protein_dict[classification] = drug_releated_protein_dict[classification] + 1 if classification in drug_releated_protein_dict else 1
"""item_dict[item] = sites
total_site.append(item_dict)
# print(total_site)
print("len(membrane_total_site):")
print(len(total_site))
print("null_discription:")
print(description_null)"""
print(protein_dict)
print("!@#$!@#################$!@%#!$^%$#@^$%&^#$%&")
print(drug_releated_protein_dict)
with open("/home/zhaiyh884/20190614_new_data/drug_and_nondrug_protein_classfication.pickle", "wb") as dbFile:
pass
pickle.dump(protein_dict, dbFile)
pickle.dump(drug_releated_protein_dict, dbFile)
pass
if __name__ == "__main__":
start = datetime.datetime.now()
#1 extract all needed infomation from pdb
rootdir = "/home/RaidDisk/pdbfiles/updb"
savefilepath = "/home/zhaiyh884/20190614_new_data/0615_data"
get_site_header_seq_info(rootdir,savefilepath)
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#2 find_all_site
#find_all_sites(rootdir)
#2 or find_memberain_sites
#find_memberain_sites(rootdir)
#3 count site numbers
#file = "/home/zhaiyh884/20190614_new_data/membrane_total_site.pickle"
#file = "/home/zhaiyh884/20190614_new_data/total_site.pickle"
#count_sites(file)
#4
#sites_anylize()
#5
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#find_drug_releated_protein(rootdir)
end = datetime.datetime.now()
print("alltime = ")
print (end-start)
|
Rio56/deeplearning
|
DTP_deeplearning/0618_ๆฐๆฐๆฎๅค็ไปฃ็ ๅๆไปถ/drug_target_data_0617.py
|
drug_target_data_0617.py
|
py
| 15,010 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22635248553
|
class Tree:
def __init__(self, height):
self.height = height
self.visible = False
def __repr__(self):
return str(self.visible)
trees = []
with open('8.input') as f:
lines = f.readlines()
for line in lines:
treeline = []
for char in line.strip():
treeline.append(Tree(int(char)))
trees.append(treeline)
for treerow in trees:
max_height = -1
for tree in treerow:
if tree.height > max_height:
tree.visible = True
max_height = tree.height
for treerow in trees:
max_height = -1
for tree in reversed(treerow):
if tree.height > max_height:
tree.visible = True
max_height = tree.height
for index in range(len(trees)-1):
max_height = -1
for treerow in trees:
tree = treerow[index]
if tree.height > max_height:
tree.visible = True
max_height = tree.height
for index in range(len(trees[0])):
max_height = -1
for treerow in reversed(trees):
print(len(trees[0])-1-index)
tree = treerow[len(trees[0])-1-index]
if tree.height > max_height:
tree.visible = True
max_height = tree.height
visible = 0
for treerow in trees:
for tree in treerow:
if tree.visible:
visible += 1
print (visible)
|
mouseboks/AoC2022
|
8.py
|
8.py
|
py
| 1,346 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40319402697
|
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api import \
Session
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls import GeneralModule
class General(GeneralModule):
CMDS = {
'set': 'set',
'search': 'get',
}
API_KEY_PATH = 'bgp'
API_MOD = 'quagga'
API_CONT = 'bgp'
API_CONT_REL = 'service'
API_CMD_REL = 'reconfigure'
FIELDS_CHANGE = [
'as_number', 'id', 'graceful', 'enabled', 'networks',
'redistribute',
]
FIELDS_ALL = FIELDS_CHANGE
FIELDS_TRANSLATE = {
'as_number': 'asnumber',
'id': 'routerid',
}
FIELDS_TYPING = {
'bool': ['enabled', 'graceful'],
'list': ['networks', 'redistribute'],
}
INT_VALIDATIONS = {
'as_number': {'min': 1, 'max': 4294967295},
}
def __init__(self, module: AnsibleModule, result: dict, session: Session = None):
GeneralModule.__init__(self=self, m=module, r=result, s=session)
|
ansibleguy/collection_opnsense
|
plugins/module_utils/main/frr_bgp_general.py
|
frr_bgp_general.py
|
py
| 1,066 |
python
|
en
|
code
| 158 |
github-code
|
6
|
20861131743
|
import requests
import os
import wget
import subprocess
def update_mindustry():
global response
global be_wrapper
global current_build
download_url = "https://github.com/Anuken/MindustryBuilds/releases/download/" + str(current_build)
download_url = download_url + "/Mindustry-BE-Desktop-" + str(current_build) + ".jar"
os.system("rm -f " + os.path.join(be_wrapper, "Mindustry.jar"))
wget.download(download_url, os.path.join(be_wrapper, "Mindustry.jar"))
bfile = open(be_wrapper + "/last.txt", "w")
bfile.write(str(current_build))
print()
def run_mindustry():
global be_wrapper
global current_build
if not os.path.exists(os.path.join(be_wrapper, "Mindustry.jar")):
print("The Mindustry jar file does not exist. Download it now?")
if input("Update now? (Y/N):").lower() == "y":
update_mindustry()
else:
print("Exiting")
exit(0)
os.system("java -jar " + be_wrapper + "/Mindustry.jar")
try:
subprocess.check_call("java -version", shell=True)
except subprocess.CalledProcessError as x:
if not x.returncode == 127:
raise
response = requests.get("https://api.github.com/repos/Anuken/MindustryBuilds/releases/latest").json()
current_build = int(response['tag_name'])
home = os.path.expanduser("~")
be_wrapper = os.path.join(home, "BEWrapper")
if not os.path.exists(be_wrapper):
os.mkdir(be_wrapper)
try:
build_file = open(be_wrapper + "/last.txt", "r")
saved_build = int(build_file.read())
build_file.close()
except FileNotFoundError:
saved_build = 0
except ValueError:
saved_build = 0
if saved_build < current_build:
print("Your Mindustry build seems to be out of date by " + str(current_build - saved_build) + " releases.")
if input("Update now? (Y/N):").lower() == "y":
update_mindustry()
print("Mindustry appears to be up to date!")
print("Running Mindustry")
run_mindustry()
|
ILiekMelons/MindustryBELauncher
|
main.py
|
main.py
|
py
| 1,958 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1639327865
|
# coding=utf-8
"""
Controller for the pre-processing, and creation of the trainable data sets.
"""
from sequenceprocessjson import process_json
from os.path import join
from json import load
# File and directory locations.
DATA_DIR = "data"
VIDEO_FILE = "YoutubeVideos"
TRAINING_MODIFIER = "_training"
VALIDATION_MODIFIER = "_validation"
VIDEO_DIR = "videos"
FRAMES_DIR = "frames"
JSON_DIR = "json"
OUTPUT_DIR = "output"
MODIFIERS = [TRAINING_MODIFIER, VALIDATION_MODIFIER]
for modifier in MODIFIERS:
# Paths
videos = join(DATA_DIR, VIDEO_FILE + modifier)
videos_path = join(DATA_DIR, VIDEO_DIR + modifier)
frames_path = join(DATA_DIR, FRAMES_DIR + modifier)
json_path = join(DATA_DIR, JSON_DIR + modifier)
output_path = join(DATA_DIR, OUTPUT_DIR + modifier)
# Process the Json files into Trainable Normalized vectors
process_json(json_path, output_path)
|
joshbrun/ExerciseDataCollection
|
Modelling/LSTM/controller.py
|
controller.py
|
py
| 892 |
python
|
en
|
code
| 2 |
github-code
|
6
|
1478963521
|
import numpy, math, itertools
from hashlib import sha1
from mbfit.exceptions import XYZFormatError, InvalidValueError, InconsistentValueError
from .fragment import Fragment
class Molecule(object):
"""
Stores the fragments of a Molecule
"""
def __init__(self, fragments):
"""
Creates a new Molecule
Args:
None
Returns:
A new Molecule
"""
# list of fragments in this molecule
self.fragments = []
for fragment in fragments:
self.add_fragment(fragment)
# list of energies for this molecule, filled in by get_nmer_energies
self.energies = {}
# list of nmer_energies for this molecule, filled by get_nmer_energies
self.nmer_energies = []
self.mb_energies = []
def get_name(self):
"""
Gets the name of this molecule, consists of the names of the fragments in standard order connected by a dash '-'
Args:
None
Returns:
The name of this molecule
"""
return "-".join([fragment.get_name() for fragment in self.get_fragments()])
def get_symmetry(self):
"""
Gets the symmetry of this molecule
Args:
None
Returns:
The symmetry of this molecule in A1B2_C1D1E1 form
"""
# used to assemble the symmetry string
try:
symmetry = self.get_fragments()[0].get_symmetry()
except IndexError:
# if there are no fragments, symmetry is empty string
return ""
# add each fragment's symmetry to the string
for fragment in self.get_fragments()[1:]:
symmetry += "_" + fragment.get_symmetry()
return symmetry
def add_fragment(self, fragment):
"""
Adds a fragment to this molecule
Args:
fragment - the fragment to add
Returns:
None
"""
# make sure the symmetry class of the atoms in this fragment doesn't violate the 1 symmetry class -> 1 atom type rule
for existing_fragment in self.get_fragments():
if fragment.get_name() == existing_fragment.get_name():
for atom_new, atom_old in zip(fragment.get_atoms(), existing_fragment.get_atoms()):
if atom_new.get_name() != atom_old.get_name():
raise InconsistentValueError("name of atom {} from one {} fragment".format(atom_old.get_name(), existing_fragment.get_name()),
"name of atom {} from another {} fragment".format(atom_new.get_name(), fragment.get_name()),
atom_old.get_name(),
atom_new.get_name(),
"atoms in fragments with the same name must have the same names in the same order.")
if atom_new.get_symmetry_class() != atom_old.get_symmetry_class():
raise InconsistentValueError("symmetry class of atom {} from one {} fragment".format(atom_old.get_name(), existing_fragment.get_name()),
"symmetry class of atom {} from another {} fragment".format(atom_new.get_name(), fragment.get_name()),
atom_old.get_symmetry_class(),
atom_new.get_symmetry_class(),
"atoms in fragments with the same name must have the same symmetry classes in the same order.")
else:
for atom_new in fragment.get_atoms():
for atom_old in existing_fragment.get_atoms():
if atom_new.get_symmetry_class() == atom_old.get_symmetry_class():
raise InconsistentValueError("symmetry class of atom {} from {} fragment".format(atom_old.get_name(), existing_fragment.get_name()),
"symmetry class of atom {} from {} fragment".format(atom_new.get_name(), fragment.get_name()),
atom_old.get_symmetry_class(),
atom_new.get_symmetry_class(),
"atoms in fragments with different names cannot be equivelent and should not have the same symmetry class.")
self.fragments.append(fragment)
return
def get_fragments(self):
"""
Gets a list of the fragments in this molecule in standard order
Args:
None
Returns:
List of fragments in this molecule in standard order
"""
return self.fragments
def get_atoms(self):
"""
Gets a list of the atoms in this molecule in standard order
fragments are first sorted into standard order, and then atoms within those fragments are put in their standard order.
Args:
None
Returns:
List of atoms in this molecule in standard order
"""
atoms = []
for fragment in self.get_fragments():
atoms += fragment.get_atoms()
return atoms
def get_charge(self, fragments = None):
"""
Gets the charge of this molecule by summing the charges of its fragments
Args:
fragments - list of fragment indicies; if specified, only get the charge of these fragments, default is to include all fragments
Returns:
Sum charge of all or some of the fragments of this molecule
"""
if fragments == None:
fragments = range(len(self.get_fragments()))
charge = 0
for index in fragments:
charge += self.get_fragments()[index].get_charge()
return charge
def get_spin_multiplicity(self, fragments = None):
"""
Gets the spin multiplicity of this molecule by summing the spin multiplicities of its fragments
Args:
fragments - list of fragment indicies; if specified, only get the spin multiplicity of these fragments, default is to include all fragments
Returns:
Sum spin multiplicity of all or some of the fragments of this molecule
"""
if fragments == None:
fragments = range(len(self.get_fragments()))
spin_multiplicity = 1
for index in fragments:
spin_multiplicity += self.get_fragments()[index].get_spin_multiplicity() - 1
return spin_multiplicity
def get_num_fragments(self):
"""
Gets the number of fragments in this molecule
Args:
None
Returns:
Number of fragments in this molecule
"""
return len(self.get_fragments())
def get_num_atoms(self):
"""
Gets the number of atoms in this molecule
Args:
None
Returns:
Number of atoms in this molecule
"""
atoms = 0
for fragment in self.get_fragments():
atoms += fragment.get_num_atoms()
return atoms
def translate(self, x, y, z):
"""
Translates all the atoms in this molecule by the given coordinates
Args:
x - amount to translate along x axis
y - amount to translate along y axis
z - amount to translate along z axis
Returns:
None
"""
for fragment in self.get_fragments():
fragment.translate(x, y, z)
def rotate(self, quaternion, origin_x = 0, origin_y = 0, origin_z = 0):
"""
Rotates this Molecule using the rotation defined by the given Quaternion
Args:
quaternion - the Quaternion to rotate by
origin_x - x position of the point to rotate around, default is 0
origin_y - y position of the point to rotate around, default is 0
origin_z - z position of the point to rotate around, default is 0
Returns:
None
"""
for fragment in self.get_fragments():
fragment.rotate(quaternion, origin_x, origin_y, origin_z)
def move_to_center_of_mass(self):
"""
Moves the molecule it its center of mass
Args:
None
Returns:
None
"""
# keep track of the total weighted mass along each axis
total_x = 0
total_y = 0
total_z = 0
# keeps track of the total mass
total_mass = 0
# loop thru every atom in the molecule, adding its contribution to each coordinate mass
for atom in self.get_atoms():
total_x += atom.get_x() * atom.get_mass()
total_y += atom.get_y() * atom.get_mass()
total_z += atom.get_z() * atom.get_mass()
total_mass += atom.get_mass()
# calculate the center of mass my dividing the total weighted mass by the total mass
center_x = total_x / total_mass
center_y = total_y / total_mass
center_z = total_z / total_mass
# translate this molecule to the center of mass
self.translate(-center_x, -center_y, -center_z)
def rotate_on_principal_axes(self):
"""
Rotates a molecule on to its principal axis
Args:
None
Returns:
None
"""
# first we calculate the moment of inertia tensor
# [ Ixx Ixy Ixz ]
# [ Iyx Iyy Iyz ]
# [ Izx Izy Izz ]
I = [[0, 0, 0] for i in range(3)]
# loop over every atom and add their contributions to the moment of inertia tensor
for atom in self.get_atoms():
# Ixx
I[0][0] += (atom.get_y() ** 2 + atom.get_z() ** 2) * atom.get_mass()
# Ixy
I[1][0] += - (atom.get_x() * atom.get_y()) * atom.get_mass()
# Ixz
I[2][0] += - (atom.get_x() * atom.get_z()) * atom.get_mass()
# Iyx
I[0][1] += - (atom.get_y() * atom.get_x()) * atom.get_mass()
# Iyy
I[1][1] += (atom.get_x() ** 2 + atom.get_z() ** 2) * atom.get_mass()
# Iyz
I[2][1] += - (atom.get_y() * atom.get_z()) * atom.get_mass()
# Izx
I[0][2] += - (atom.get_z() * atom.get_x()) * atom.get_mass()
# Izy
I[1][2] += - (atom.get_z() * atom.get_y()) * atom.get_mass()
# Izz
I[2][2] += (atom.get_x() ** 2 + atom.get_y() ** 2) * atom.get_mass()
inertia_tensor = numpy.matrix(I)
# print("Inertia Tensor:", inertia_tensor)
# get numpy matrix from the matrix of principal moments
# get the moments and principal axis as eigen values and eigen vectors
(moments, principal_axes) = numpy.linalg.eigh(inertia_tensor)
idx = numpy.argsort(moments)[::-1]
moments = moments[idx]
principal_axes = principal_axes[:,idx]
fifthmoment = numpy.zeros(3)
# only works for molecules with no symmetry
for atom in self.get_atoms():
fifthmoment += (numpy.matrix([atom.get_x(), atom.get_y(), atom.get_z()]) * principal_axes).getA1() ** 5 * atom.get_mass()
if fifthmoment[0] < 1e-6:
principal_axes[:, 0] *= -1
if fifthmoment[1] < 1e-6:
principal_axes[:, 1] *= -1
if numpy.linalg.det(principal_axes) < 0:
principal_axes[:, 2] *= -1
# update the position of each atom
for atom in self.get_atoms():
x, y, z = (numpy.matrix([atom.get_x(), atom.get_y(), atom.get_z()]) * principal_axes).getA1()
atom.set_xyz(float(x), float(y), float(z))
def rmsd(self, other):
"""
Computes the RMSD between the positions of the atoms in two molecules
molecules must have the same fragments and atoms or an InconsistentValueError will be raised.
generally, you should make sure that both molecules have been moved to their center of mass and rotated on their principal axes.
Args:
other - the molecule to compare this one to
Returns:
The square-root of the mean squared distance between the atoms in this molecule and the other
"""
# fist make sure these molecules have the same number of atoms
if self.get_num_atoms() != other.get_num_atoms():
raise InconsistentValueError("number of atoms in self", "number of atoms in other", self.get_num_atoms(), other.get_num_atoms(), "number of atoms in each molecule must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
squared_distance = 0
# loop thru every pair of atoms in the two molecules
for this_atom, other_atom in zip(self.get_atoms(), other.get_atoms()):
# check to make sure that these atoms are the same type
if this_atom.get_name() != other_atom.get_name():
raise InconsistentValueError("self atom symbol", "other atom symbol", this_atom.get_name(), other_atom.get_name(), "symbols must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
# add this atom pair's contribution to the squared distance
squared_distance += this_atom.distance(other_atom) ** 2
# compute rmsd as sqrt of mean squared distance
return math.sqrt(squared_distance / self.get_num_atoms())
def rmsd2(self, other):
self_atoms = self.get_atoms()
other_atoms = other.get_atoms()
rmsds = []
for order in itertools.permutations(other_atoms):
squared_distance = 0
# loop thru every pair of atoms in the two molecules
for this_atom, other_atom in zip(self.get_atoms(), order):
# add this atom pair's contribution to the squared distance
squared_distance += this_atom.distance(other_atom) ** 2
rmsds.append(math.sqrt(squared_distance / self.get_num_atoms()))
return min(rmsds)
def distancermsd(self, other_molecule):
"""
Computes the RMSD of intramolecular interatomic distances in the two molecules
molecules must have the same fragments and atoms or an InconsistentValueError will be raised.
generally, you should make sure that both molecules have been moved to their center of mass and rotated on their principal axes.
Note:
this function is distinct from rmsd() because this function takes the rmsd of the differneces between the distances between pairs of atoms within each molecule
while rmsd() takes the rmsd of the distance between the positions of the same atoms in each molecule.
Args:
other_molecule - the molecule to ompare this one to
Returns:
the square-root of the mean squared difference in the distance between each pair of atoms in this molecule and the other
"""
# fist make sure these molecules have the same number of atoms
if self.get_num_atoms() != other_molecule.get_num_atoms():
raise InconsistentValueError("number of atoms in self", "number of atoms in other", self.get_num_atoms(), other_molecule.get_num_atoms(), "number of atoms in each molecule must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
squared_distance_difference = 0
# loop over each pair of atoms
for atom_index, this_atom1, other_atom1 in zip(range(self.get_num_atoms()), self.get_atoms(), other_molecule.get_atoms()):
for this_atom2, other_atom2 in zip(self.get_atoms()[atom_index + 1:], other_molecule.get_atoms()[atom_index + 1:]):
# check to make sure that the atom1s have the same type
if this_atom1.get_name() != other_atom1.get_name():
raise InconsistentValueError("self atom symbol", "other atom symbol", this_atom.get_name(), other_atom.get_name(), "symbols must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
# check to make sure that the atom2s have the same type
if this_atom2.get_name() != other_atom2.get_name():
raise InconsistentValueError("self atom symbol", "other atom symbol", this_atom.get_name(), other_atom.get_name(), "symbols must be the same, make sure you are computing the rmsd of two molecules with the same atoms and fragments")
# add these atom pairs' contribution to the squared distance difference
squared_distance_difference += (this_atom1.distance(this_atom2) - other_atom1.distance(other_atom2)) ** 2
# compute the rmsd of the sqrt of mean squared distance difference
return math.sqrt(squared_distance_difference / self.get_num_atoms())
def compare(self, other, cutoff_rmsd = 0.1):
"""
Compares two molecules to see if they are similar to eachother bellow a cutoff rmsd
Args:
other - the molecule to compare this one to
cutoff_rmsd - the rmsd level at which False will be returned, defailt is 0.1
Returns:
True if the rmsd between this molecule and the other is less than cutoff_rmsd, otherwise False
Always returns False if the two molecules do not have the same fragments and atoms
"""
try:
return self.rmsd(other) < cutoff_rmsd
except InconsistentValueError:
return False
def get_excluded_pairs(self, max_exclusion = 3):
"""
Gets the excluded pairs of this molecule
Args:
None
Returns:
a tuple in the format (excluded_12, excluded_13, excluded_14, ..., excluded_1x) where each ecluded_1x is a list of lists of each fragment's excluded 1x pairs
"""
excluded_pairs = [[] for i in range(max_exclusion)]
for index, fragment in enumerate(self.get_fragments()):
frag_excluded_pairs = fragment.get_excluded_pairs(max_exclusion)
for exclusion_index in range(max_exclusion):
excluded_pairs[exclusion_index].append(frag_excluded_pairs[exclusion_index])
return excluded_pairs
def to_xyz(self, fragments=None, cp=False, num_digits=14):
"""
Gets a string representation of the fragments in this molecule in the xyz file format
Args:
fragments - list of fragment indicies to include in the string; optional, default is to include all fragments.
cp - if True then fragments not specified in the fragments list will be included as ghost fragments.
num_digits - The number of digits after the decimal point to include when writing atom coordinates.
Default: 14 Maximum: 14
Returns:
String representation of the fragments in this molecule in the xyz format
"""
# by default, use all fragments
if fragments == None:
fragments = range(self.get_num_fragments())
string = ""
for index in range(len(self.get_fragments())):
if index in fragments:
string += self.get_fragments()[index].to_xyz(num_digits=num_digits)
elif cp:
string += self.get_fragments()[index].to_ghost_xyz(num_digits=num_digits)
return string[:-1] # removes last character of string (extra newline)
def to_standard_xyz(self, fragments=None, cp=False, num_digits=14):
"""
Gets a string representation of the fragments in this molecule in the xyz file format.
The order of the fragments and atoms is in standard order.
Args:
fragments - list of fragment indicies to include in the string; optional, default is to include all fragments.
cp - if True then fragments not specified in the fragments list will be included as ghost fragments.
num_digits - The number of digits after the decimal point to include when writing atom coordinates.
Default: 14 Maximum: 14
Returns:
String representation of the fragments in this molecule in the xyz format in standard order.
"""
# by default, use all fragments
if fragments == None:
fragments = range(self.get_num_fragments())
string = ""
for index in range(len(self.get_standard_order())):
if index in fragments:
string += self.get_standard_order()[index].to_standard_xyz(num_digits=num_digits)
elif cp:
string += self.get_standard_order()[index].to_standard_ghost_xyz(num_digits=num_digits)
return string[:-1] # removes last character of string (extra newline)
'''
Returns a string containing indicies and energies of nbody fragment
combinations in the format of the log file
'''
def log_frag_energy(self):
string = ""
# for each item in energies, add its combination indicies and energy
# to the output string
for combination in self.energies.keys():
string += "E{}: {}\n".format(combination, "%.8f"%self.energies[combination])
return string
'''
Returns a string containing the many body interaction energies, in the
format of the log file
'''
def log_mb_energy(self, limit):
string = ""
for index in range(limit):
string += "V_{}B: {}\n".format(index + 1, "%.8f"%self.mb_energies[index])
return string
'''
Clears the energies, nmer_energies, and mb_energies fields to make way for
new calculations
'''
def clear(self):
self.energies = {}
self.nmer_energies = []
self.mb_energies = []
def get_SHA1(self):
"""
Generates the SHA1 hash of this molecule. Uses atoms, spin multiplicity and charge. Can be used to uniquely identify this molecule.
Sorts fragments and atoms into standard order first, so the same molecule specified differently will have the same hash
Args:
None
Returns:
SHA1 hash of this molecule
"""
hash_string = self.get_name() + "\n" + self.to_xyz(num_digits=5) + "\n" + str(self.get_charge()) + "\n" + str(self.get_spin_multiplicity())
return sha1(hash_string.encode()).hexdigest()
def get_symbols(self):
"""
Gets the atomic symbols of the atoms in this molecule as a list
Args:
None
Returns:
list of the atomic symbols of the atoms in this molecule
"""
return [atom.get_name() for atom in self.get_atoms()]
def get_coordinates(self):
"""
Gets the positions of the atoms in this molecule as a list of 3-tuples
Args:
None
Returns:
list of the positions of the atoms in this moleule
"""
return [(atom.get_x(), atom.get_y(), atom.get_z()) for atom in self.get_atoms()]
@staticmethod
def read_xyz(string, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment):
"""
Reads fragments from an xyz string and creates a new Molecule.
Args:
string - The xyz format string. Including the atom count line and comment line.
atoms_per_fragment - List containing the number of atoms in each fragment.
name_per_fragment - List containing the names of each fragment.
charge_per_fragment - List containing the charges of each fragment.
spin_multiplicity_per_fragment - List containing the spin multiplicities of each fragment.
symmetry_per_fragment - List containing the symmetries of each fragment, in format A1B2.
SMILE_per_fragment - List containing the SMILE strings of each fragment.
Returns:
The new Molecule.
"""
# Error checking to make sure all lists passed in are the same length
if not len(atoms_per_fragment) == len(symmetry_per_fragment):
raise InconsistentValueError("atoms per fragment", "symmetry per fragment", atoms_per_fragment, symmetry_per_fragment, "lists must be same length")
if not len(atoms_per_fragment) == len(charge_per_fragment):
raise InconsistentValueError("atoms per fragment", "charges per fragment", atoms_per_fragment, charge_per_fragment, "lists must be same length")
if not len(atoms_per_fragment) == len(spin_multiplicity_per_fragment):
raise InconsistentValueError("atoms per fragment", "spin multiplicities per fragment", atoms_per_fragment, spin_multiplicity_per_fragment, "lists must be same length")
if not len(atoms_per_fragment) == len(name_per_fragment):
raise InconsistentValueError("atoms per fragment", "fragment names", atoms_per_fragment, name_per_fragment, "lists must be same length")
if not len(atoms_per_fragment) == len(SMILE_per_fragment):
raise InconsistentValueError("atoms per fragment", "fragment SMILES", atoms_per_fragment, SMILE_per_fragment, "lists must be same length")
# break the input string apart along \n characters
lines = string.splitlines()
# read the total number of atoms from the first line of the xyz
try:
atom_total = int(lines[0])
except ValueError:
raise XYZFormatError("{}".format(lines[0]), "line should contain a single integer")
# make sure that the total number of atoms indicated by the xyz file matches the number of atoms indicated per fragment
if atom_total != sum(atoms_per_fragment):
raise InconsistentValueError("total atoms in xyz string", "fragments", atom_total, atoms_per_fragment, "fragments list must sum to total atoms from input xyz string")
# remove the atom total and comment lines from the lines list
lines = lines[2:]
# make sure that there are a number of lines equal to the total number of atoms
if len(lines) != atom_total:
raise InconsistentValueError("total atoms in xyz string", "atom lines in xyz string", atom_total, len(lines), "number of total atoms indicated in xyz string should match number of atom lines")
fragments = []
# loop over each item in the lists, each iteration containing the information to assemble one fragment
for num_atoms, name, charge, spin, symmetry, SMILE in zip(atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment):
#
fragments.append(Fragment.read_xyz("\n".join(lines[:num_atoms]), name, charge, spin, SMILE, symmetry))
# remove a number of lines from the lines list equal to the number used in the Fragment that was just read
lines = lines[num_atoms:]
return Molecule(fragments)
@staticmethod
def read_xyz_file(file, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment):
"""
Reads fragments from an xyz file and creates a new Molecule.
Will attempt to read lines from the given file handle, raising a StopIteration exception if called on an empty file.
Args:
file - The file to read from.
atoms_per_fragment - List containing the number of atoms in each fragment.
name_per_fragment - List containing the names of each fragment.
charge_per_fragment - List containing the charges of each fragment.
spin_multiplicity_per_fragment - List containing the spin multiplicities of each fragment.
symmetry_per_fragment - List containing the symmetries of each fragment, in format A1B2.
SMILE_per_fragment - List containing the SMILE strings of each fragment.
Returns:
The new Molecule.
"""
# build the xyz string
string = ""
# read blank lines until a non-blank line is found.
while(True):
line = file.readline()
# If line is EOF, then raise StopIteration to say that there are no more molecules in this file.
if line == "":
raise StopIteration
# If line is not a blank line, stop reading blank lines.
if line is not "\n":
break
# add the atom count line to the string.
string += line
# read the comment line.
string += file.readline()
for i in range(sum(atoms_per_fragment)):
line = file.readline()
# if the line is EOF, we have reached EOF mid-parse!
if line == "":
raise XYZFormatError("ran out of lines to read from xyz file {} in the middle of a molecule".format(file.name), "make sure atoms_per_fragment, the atom count line in your xyz file, and the number of atom lines in your xyz file all agree.")
string += line
return Molecule.read_xyz(string, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment)
@staticmethod
def read_xyz_path(path, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment):
"""
Reads fragments from an xyz file indicated by a filepath and constructs a new Molecule.
Will attempt to read lines from the file at the given file path, raising an exception if it runs out of lines mid-parse.
Args:
path - The path to the file to read from.
atoms_per_fragment - List containing the number of atoms in each fragment.
name_per_fragment - List containing the names of each fragment.
charge_per_fragment - List containing the charges of each fragment.
spin_multiplicity_per_fragment - List containing the spin multiplicities of each fragment.
symmetry_per_fragment - List containing the symmetries of each fragment, in format A1B2.
SMILE_per_fragment - List containing the SMILE strings of each fragment.
Returns:
The new Molecule.
"""
with open(path, "r") as file:
try:
return Molecule.read_xyz_file(file, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment)
# if the call to read_xyz_file() raises a StopIteration, it means the file was empty
except StopIteration:
raise XYZFormatError("xyz file {} file is empty".format(file.name), "make sure the xyz file has at least 1 molecule in it")
@staticmethod
def read_xyz_direct(string, settings = None):
"""
Reads fragments from a string and constructs a new Molecule.
Will infer a single fragment with charge 0, spin 1, no symmetry, and a SMILE with atoms
connected in the order they appear in the string if settings is None.
Args:
string - The string to read from.
settings - Settings object containing information about the molecule.
Returns:
The new Molecule.
"""
# if settings is None, then infer default values for molecule attributes
if settings is None:
name_per_fragment = ["noname"]
charge_per_fragment = [0]
spin_multiplicity_per_fragment = [1]
total_atoms = int(string.splitlines()[0])
atoms_per_fragment = [total_atoms]
symmetry = ""
symmetry_class = 65
# loop over each atom assigning it a unique symmetry class
for atom_index in range(total_atoms):
symmetry += "{}1".format(chr(symmetry_class))
symmetry_class += 1
symmetry_per_fragment = [symmetry]
SMILE = ""
for line in string.splitlines()[2:]:
SMILE += "[" + line.split()[0] + "]"
SMILE_per_fragment = [SMILE]
# if settings is defined, read values from xyz file
else:
atoms_per_fragment = [int(count) for count in settings.get("molecule", "fragments").split(",")]
name_per_fragment = settings.get("molecule", "names").split(",")
charge_per_fragment = [int(charge) for charge in settings.get("molecule", "charges").split(",")]
spin_multiplicity_per_fragment = [int(spin) for spin in settings.get("molecule", "spins").split(",")]
symmetry_per_fragment = settings.get("molecule", "symmetry").split(",")
SMILE_per_fragment = settings.get("molecule", "SMILES").split(",")
return Molecule.read_xyz(string, atoms_per_fragment, name_per_fragment, charge_per_fragment, spin_multiplicity_per_fragment, symmetry_per_fragment, SMILE_per_fragment)
@staticmethod
def read_xyz_file_direct(file, settings = None):
"""
Reads fragments from a file into a new Molecule.
Will infer a single fragment with charge 0, spin 1, no symmetry, and a SMILE with atoms
connected in the order they appear in the string if settings is None.
Args:
file - The file to read from.
settings - Settings object containing information about the molecule.
Returns:
The new Molecule.
"""
if settings is None:
position = file.tell()
atoms_per_fragment = [int(file.readline())]
file.seek(position)
else:
atoms_per_fragment = [int(count) for count in settings.get("molecule", "fragments").split(",")]
# build the xyz string
string = ""
# read lines from the file equal to the number needed for one molecule
for line_count in range(2 + sum(atoms_per_fragment)):
line = file.readline()
# if the line is an empty string, then we have reached end of file mid parse
if line == "":
if line_count == 0:
raise StopIteration # if the first line is empty, raise StopIteration to indicate that this file is out of molecules to parse
raise XYZFormatError("ran out of lines to read from xyz file {} in the middle of a molecule".format(file.name), "make sure the last molecule in the file has a comment line and a number of atoms equal to the amount indicated in the atom count line.")
string += line
return Molecule.read_xyz_direct(string, settings)
@staticmethod
def read_xyz_path_direct(path, settings = None):
"""
Reads fragments from an xyz_file indicated by a path into this Molecule
Will infer a single fragment with charge 0, spin 1, no symmetry, and a SMILE with atoms
connected in the order they appear in the string if settings is None.
Args:
path - The path to read from.
settings - Settings object containing information about the molecule.
Returns:
The new Molecule.
"""
with open(path, "r") as file:
try:
return Molecule.read_xyz_file_direct(file, settings)
# if the call to read_xyz_file() raises a StopIteration, it means the file was empty
except StopIteration:
raise XYZFormatError("xyz file {} file is empty".format(file.name), "make sure the xyz file has at least 1 molecule in it")
@staticmethod
def read_psi4_string(string):
"""
Reads the string outputted by a call to psi4.molecule.save_string_xyz() into a new Molecule.
Molecules created this way will not have name or symmetry saved correctly, because this information is not available
from the output of psi4.molecule.save_string_xyz(). As a result certain operations will not work on this molecule, for example
do not add this molecule to a database or attempt to generate its polynomial input format in style A1B2.
Args:
string - String output of psi4.molecule.save_string_xyz().
Returns:
The new Molecule.
"""
# divide the string along \n characters
lines = string.splitlines()
# read charge and spin from first line of input string, casting each to an int
try:
charge, spin_multiplicity = [int(value) for value in lines[0].split()]
except ValueError:
raise XYZFormatError(lines[0], "line format should be 'charge spin_multiplicity', make sure you are passing in the output of psi4.molecule.save_string_xyz()")
# calculate total atoms in this molecule
total_atoms = len(lines) - 1
# these fields do not matter
name = "unnamed"
# used to build the symmetry string for the fragment
symmetry = ""
# keeps track of which symmetry_class to use for the next atom
symmetry_class = 65
# loop over each atom assigning it a unique symmetry class
for atom_index in range(total_atoms):
symmetry += "{}1".format(chr(symmetry_class))
symmetry_class += 1
SMILE = ""
for line in string.splitlines()[1:]:
SMILE += line.split()[0]
return Molecule([Fragment.read_xyz("\n".join(lines[1:]), name, charge, spin_multiplicity, SMILE, symmetry)])
def get_standard_order(self):
return sorted(self.fragments, key = lambda x: x.get_name())
def get_config_molecule_section(self):
# TODO: update SMILE
fragments_list = self.get_standard_order()
names = "{}\n".format(",".join(fragment.get_name() for fragment in fragments_list))
fragments = "{}\n".format(",".join(str(fragment.get_num_atoms()) for fragment in fragments_list))
charges = "{}\n".format(",".join(str(fragment.get_charge()) for fragment in fragments_list))
spins = "{}\n".format(",".join(str(fragment.get_spin_multiplicity()) for fragment in fragments_list))
symmetry = "{}\n".format(",".join(fragment.get_standard_symmetry() for fragment in fragments_list))
SMILES = "{}\n".format(",".join(fragment.get_standard_SMILE() for fragment in fragments_list))
next_letter = "A"
for i in range(len(symmetry)):
if symmetry[i].isupper():
symmetry = symmetry[:i] + next_letter + symmetry[i + 1:]
next_letter = chr(ord(next_letter) + 1)
return names, fragments, charges, spins, symmetry, SMILES
def confirm_standard_order(self):
"""
Checks if this fragment is in standard order.
Args:
None.
Returns:
True if this fragment's atoms are in standard order.
False otherwise.
"""
if not self.get_standard_order() == self.get_fragments():
return False
for fragment in self.get_fragments():
if not fragment.confirm_standard_order():
return False
return True
def get_standard_copy(self):
"""
Gets a copy of this molecule, with fragments and atoms in standard order.
Args:
None.
Returns:
A copy of this molecule in standard order.
"""
order, frag_orders = self.get_standard_order_order()
return self.get_reordered_copy(order, frag_orders, [frag.get_standard_SMILE() for frag in self.get_standard_order()])
def get_reorder_copy(self, names, SMILES):
"""
Gets a copy of this molecule, with fragments in the order specified by the names list and
atoms in the order specified in the SMILE strings.
Args:
names - names of the fragments in the new order.
SMILE - list of SMILE strings corresponding to the new order of fragments.
Order the atoms of each fragment to match the order in these SMILE strings.
Returns:
A copy of this molecule in the order specified by names and SMILES.
"""
order, frag_orders = self.get_reorder_order(names, SMILES)
return self.get_reordered_copy(order, frag_orders, SMILES)
def get_copy(self):
"""
Gets a copy of this molecule.
Args:
None.
Returns:
An exact copy of this molecule.
"""
return self.get_reorder_copy([fragment.get_name() for fragment in self.get_fragments()],
[fragment.get_SMILE() for fragment in self.get_fragments()])
def get_standard_order_order(self):
"""
Gets the order the fragments and atoms in this molecule must be in to be in standard order.
Args:
None.
Returns:
(order, frag_orders)
order - A list of indices, where indices[i] = index of fragment that should be in index i to put the molecule in standard order.
frag_orders - A list of lists, where each list corresponds to one fragment.
where frag_orders[j][i] = index of atom that should be in index i to put the fragment j of the new order in standard order.
"""
order = [self.get_fragments().index(frag) for frag in self.get_standard_order()]
frag_orders = [frag.get_standard_order_order() for frag in [self.get_fragments()[index] for index in order]]
return order, frag_orders
def get_reorder_order(self, names, SMILES):
"""
Gets the order the fragments and atoms in this molecule must be in to match the SMILE string.
Args:
names - order the fragments to match the order in this list.
SMILE - order the atoms of each fragment to match the orders in these SMILE strings.
Returns:
(order, frag_orders)
order - A list of indices, where indices[i] = index of fragment that should be in index i to put the fragments in the order specified.
frag_orders - A list of lists, where each list corresponds to one fragment.
where frag_orders[j][i] = index of atom that should be in index i to put the fragment j of the new order in the order specified.
"""
order = []
for name in names:
for index, fragment in enumerate(self.get_fragments()):
if fragment.get_name() == name and index not in order:
order.append(index)
frag_orders = [frag.get_reorder_order(SMILE) for frag, SMILE in zip([self.get_fragments()[index] for index in order], SMILES)]
return order, frag_orders
def get_reordered_copy(self, order, frag_orders, SMILES):
"""
Gets a copy of this molecule, the fragments and atoms are reordered according to the input.
Args:
order - New order of the fragments.
frag_orders - New order of the atoms within each fragment.
SMILES - new SMILE strings for each of the fragments.
Returns:
A copy of this molecule, reordered to match the input.
"""
fragments = []
prev_frag_name = None
next_symmetry = 'A'
symmetry_dict = {}
for fragment, frag_order, SMILE in zip([self.get_fragments()[index] for index in order], frag_orders, SMILES):
prev_frag_name = fragment.get_name()
fragments.append(fragment.get_reordered_copy(frag_order, SMILE))
for atom in fragments[-1].get_atoms():
try:
symmetry = symmetry_dict[atom.get_symmetry_class()]
except:
symmetry = next_symmetry
symmetry_dict[atom.get_symmetry_class()] = symmetry
next_symmetry = chr(ord(next_symmetry) + 1)
atom.set_symmetry_class(symmetry)
return Molecule(fragments)
def __eq__(self, other):
if not self.get_name() == other.get_name():
return False
for self_frag, other_frag in zip(self.get_fragments(), other.get_fragments()):
if self_frag != other_frag:
return False
return True
def __ne__(self, other):
return not self == other
|
paesanilab/MB-Fit
|
mbfit/molecule/molecule.py
|
molecule.py
|
py
| 44,981 |
python
|
en
|
code
| 14 |
github-code
|
6
|
17870516114
|
from src.costco.CostcoItem import CostcoItem
from src.smartstore.CostcoRegister import SmartStoreItemRegister
from src.utills import Utills
# ์ฝ์คํธ์ฝ ์์ดํ
์ ์ ๋ณด๋ฅผ ์ฝ์ด์ ์ค๋งํธ์คํ ์ด์ ์์ฑํ๋ ์คํ ๋ก์ง
def excute():
webDriverPath = '/Users/tak/tak/python/crawling-dev/chromedriver'
driver = Utills.getChromeDriver(webDriverPath)
costcoPath = 'https://www.costco.co.kr/BeautyHouseholdPersonal-Care/BathFacial-Tissue/BathFacial-Tissue/KS-Premium-Bath-Tissue-40m-x-30roll-x-2/p/601285'
# ์ฝ์คํธ์ฝ ์ ๋ณด ํฌ๋กค๋ง
costcoItem = CostcoItem(driver, costcoPath).excute()
costcoItem.quit()
# ์ค๋งํธ์คํ ์ด ์ํ ๋ฑ๋กํ๊ธฐ
driver = Utills.getChromeDriver(webDriverPath)
store = SmartStoreItemRegister(driver)
store.itemId = costcoItem.itemId
store.priceMarginRate = 1.08 # 8% ๋ง์ง
store.itemTitle = costcoItem.itemTitle
store.deliveryPrice = 3000 # ๋ฐฐ์ก๋ฃ 3000
store.itemPrice = costcoItem.itemPrice
store.itemImgs = costcoItem.itemImgs
store.itemDetailInfo = costcoItem.itemDetailInfo
store.excute()
excute()
|
geontark/crawling-dev
|
src/excute/CostcoRegisterExcute.py
|
CostcoRegisterExcute.py
|
py
| 1,129 |
python
|
en
|
code
| 2 |
github-code
|
6
|
73823061627
|
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
def can_attend_meetings(intervals):
intervals.sort(key=lambda i: i.start)
for i in range(1, len(intervals)):
if intervals[i].start < intervals[i - 1].end:
return False
return True
print(can_attend_meetings([Interval(0, 30), Interval(5, 10), Interval(15, 20)]))
# time complexity: o(nlog(n))
# space complexity: o(1)
|
jateen67/leetcode
|
intervals/easy/252_meeting_rooms.py
|
252_meeting_rooms.py
|
py
| 466 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20677953442
|
from django.test import TestCase
from django.urls import reverse
from apps.shop.models import Product
from apps.users.models import CustomUser
from .models import Order
test_order = {"name": "Django Django", "email": "[email protected]", "paid": True}
test_product = {
"name": "Test Product",
"abbr": "TEPR",
"slug": "tepr",
"description": "Test Product description",
"price": 2000,
}
normal_user = {"username": "normal", "email": "[email protected]", "password": "foo"}
# Create your tests here.
class TestOrderModelCreation(TestCase):
"""Test Product Model Creation"""
def setUp(self):
self.test_order = test_order
self.test_product = test_product
Order.objects.create(
**self.test_order, product=Product.objects.create(**self.test_product)
)
def test_order_model_created(self):
obj = Order.objects.get(name=self.test_order["name"])
self.assertEqual(obj.name, self.test_order["name"])
self.assertEqual(obj.email, self.test_order["email"])
self.assertEqual(obj.paid, self.test_order["paid"])
self.assertEqual(obj.product.name, self.test_product["name"])
class TestOrderCreateView(TestCase):
"""Test Order Create View"""
def setUp(self):
self.test_order = test_order
self.test_product = test_product
self.test_user = normal_user
CustomUser.objects.create_user(**self.test_user)
Order.objects.create(
**self.test_order, product=Product.objects.create(**self.test_product)
)
def test_order_create_view(self):
response = self.client.get(reverse("order_create"))
self.assertTemplateUsed(response, "orders/order_form.html")
self.assertEqual(response.status_code, 200)
def test_main_author(self):
main_author = CustomUser.objects.get(username=self.test_user["username"])
main_author.main_user = True
main_author.save()
response = self.client.get(reverse("order_create"))
self.assertEqual(response.context["main_author"], main_author)
class TestSuccessView(TestCase):
"""Test Success View"""
def setUp(self):
self.test_user = normal_user
CustomUser.objects.create_user(**self.test_user)
def test_order_success_view(self):
response = self.client.get(reverse("success_created"))
self.assertTemplateUsed(response, "orders/success_created.html")
self.assertEqual(response.status_code, 200)
def test_main_author(self):
main_author = CustomUser.objects.get(username=self.test_user["username"])
main_author.main_user = True
main_author.save()
response = self.client.get(reverse("success_created"))
self.assertEqual(response.context["main_author"], main_author)
|
akundev/akundotdev
|
apps/orders/tests.py
|
tests.py
|
py
| 2,822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.