seq_id
stringlengths
7
11
text
stringlengths
156
1.7M
repo_name
stringlengths
7
125
sub_path
stringlengths
4
132
file_name
stringlengths
4
77
file_ext
stringclasses
6 values
file_size_in_byte
int64
156
1.7M
program_lang
stringclasses
1 value
lang
stringclasses
38 values
doc_type
stringclasses
1 value
stars
int64
0
24.2k
dataset
stringclasses
1 value
pt
stringclasses
1 value
16539700477
import os import subprocess import sphinx_autobuild.build from sphinx_autobuild.build import show def get_builder(watcher, sphinx_args, *, host, port, pre_build_commands): """Prepare the function that calls sphinx.""" def build(): """Generate the documentation using ``sphinx``.""" if not watcher.filepath: return show(context=f"Detected change: {watcher.filepath}") show(context="python3 -m invoke doc") subprocess.run("python3 -m invoke doc".split(), check=False) for locale in "zh_CN", "de_DE": if os.path.isdir(f"output/{locale}"): show(context=f"python3 -m invoke intl -l {locale}") subprocess.run( f"python3 -m invoke intl -l {locale}".split(), check=False ) show(context="python3 -m invoke run -t post-process") subprocess.run("python3 -m invoke run -t post-process".split(), check=False) show(context=f"Serving on http://{host}:{port}") return build sphinx_autobuild.build.get_builder = get_builder if __name__ == "__main__": from sphinx_autobuild.cli import main main()
Nuitka/Nuitka-website
misc/sphinx_autobuild_wrapper.py
sphinx_autobuild_wrapper.py
py
1,176
python
en
code
10
github-code
6
7261153491
import cv2 import numpy as np img = cv2.imread('bookpage.jpg') grayscaled = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #retval, threshold = cv2.threshold(grayscaled, 11, 255 , cv2.THRESH_BINARY) threshold = cv2.adaptiveThreshold(grayscaled, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1) median = cv2.medianBlur(threshold,3) gaus = cv2.GaussianBlur(threshold,(5,5),0) cv2.imshow('original',img) cv2.imshow('gray',grayscaled) cv2.imshow('threshold',threshold) cv2.imshow('gaus',gaus) cv2.waitKey(0) cv2.destroyAllWindows()
felipemateus/vis-oCompEstudo
threshHoldExemple2/threshHold.py
threshHold.py
py
541
python
en
code
0
github-code
6
8398660267
import logging import numpy as np import tensorflow as tf import tensorflow.keras as keras from tensorflow.keras import backend as K from tensorflow.keras import Sequential from tensorflow.keras.layers import Conv2D, Flatten, Dense from openfl.models.tensorflow import KerasFLModel class KerasCNN(KerasFLModel): """A basic convolutional neural network model. """ def __init__(self, **kwargs): """Initializer Args: **kwargs: Additional parameters to pass to the function """ super().__init__(**kwargs) self.model = self.build_model(self.feature_shape, self.data.num_classes, **kwargs) self.set_logger() print(self.model.summary()) if self.data is not None: print("Training set size: %d; Validation set size: %d" % (self.get_training_data_size(), self.get_validation_data_size())) def build_model(self, input_shape, num_classes, conv_kernel_size=(4, 4), conv_strides = (2, 2), conv1_channels_out=16, conv2_channels_out=32, final_dense_inputsize=100, **kwargs): """Define the model architecture. Args: input_shape (numpy.ndarray): The shape of the data num_classes (int): The number of classes of the dataset Returns: tensorflow.python.keras.engine.sequential.Sequential: The model defined in Keras """ model = Sequential() model.add(Conv2D(conv1_channels_out, kernel_size=conv_kernel_size, strides=conv_strides, activation='relu', input_shape=input_shape)) model.add(Conv2D(conv2_channels_out, kernel_size=conv_kernel_size, strides=conv_strides, activation='relu')) model.add(Flatten()) model.add(Dense(final_dense_inputsize, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy']) # initialize the optimizer variables opt_vars = model.optimizer.variables() for v in opt_vars: v.initializer.run(session=self.sess) return model
sarthakpati/OpenFederatedLearning
openfl/models/tensorflow/keras_cnn/keras_cnn.py
keras_cnn.py
py
2,530
python
en
code
1
github-code
6
12571576568
import requests import pandas from bs4 import BeautifulSoup import json url = 'https://www.imdb.com/chart/top/' response = requests.get(url).content soup = BeautifulSoup(response,'html.parser') title = soup.find_all('td', class_='titleColumn') rating = soup.find_all('strong') images = soup.find_all('img') movie_name = [] movie_year =[] movie_href =[] movie_image = [] movie_rating = [] for t in title: imdb_title_num = t.a.get('href').split('/')[2] href = 'https://www.imdb.com/title/'+imdb_title_num movie_href.append(href) imdb_title = t.a.text movie_name.append(imdb_title) year = t.span.text movie_year.append(year) for rate in rating: r = rate.text movie_rating.append(r) for img in images: i = img.get('src') movie_image.append(i) model = pandas.DataFrame({'title': movie_name, 'year': movie_year, 'rating': movie_rating, 'image': movie_image, 'href': movie_href}) model.to_json('movies_data.json', orient="records")
gpuligundla/IMDB-Top-Movies-List
imdb_scrap.py
imdb_scrap.py
py
979
python
en
code
0
github-code
6
28176425279
import sqlite3 import json from datetime import datetime from traceback import print_tb from helpers import create_table_if_not_exists, get_db_path, get_timeframe_path, format_data, \ acceptable, get_timeframes timeframes = get_timeframes() sql_transaction = [] start_row = 0 # start_row = 8400000 # that is where I stopped it last time print(timeframes) def find_parent(pid): try: sql = "SELECT comment FROM parent_reply WHERE comment_id = '{}' LIMIT 1".format(pid) c.execute(sql) result = c.fetchone() if result is not None: res = result[0] if res is None or res == 'False' or res == '0': return False return res else: return False except Exception as e: print('find_parent', e) print_tb(e) return False def find_existing_score(pid): if pid is False: return False try: sql = "SELECT score FROM parent_reply WHERE parent_id = '{}' LIMIT 1".format(pid) c.execute(sql) result = c.fetchone() if result is not None: return result[0] else: return False except Exception as e: print('find_existing_score', e) print_tb(e) return False def transaction_bldr(sql, bindings = None): global sql_transaction sql_transaction.append([sql, bindings]) if len(sql_transaction) > 2000: c.execute('BEGIN TRANSACTION') for s, b in sql_transaction: try: if b is not None: c.execute(s, b) else: c.execute(s) # except Exception as e: # print(str(datetime.now()), s, e) except: pass connection.commit() sql_transaction = [] def sql_insert_replace_comment(commentid, parentid, parent, comment, subreddit, time, score): try: sql = """UPDATE parent_reply SET parent_id = ?, comment_id = ?, parent = ?, comment = ?, subreddit = ?, unix = ?, score = ? WHERE parent_id = ?;""" b = [parentid, commentid, parent, comment, subreddit, int(time), score, parentid] transaction_bldr(sql, b) except Exception as e: print('sql_insert_replace_comment', e) print_tb(e) def sql_insert_has_parent(commentid, parentid, parent, comment, subreddit, time, score): try: sql = """INSERT INTO parent_reply (parent_id, comment_id, parent, comment, subreddit, unix, score) VALUES (?, ?, ?, ?, ?, ?, ?); """ b = [parentid, commentid, parent, comment, subreddit, int(time), score] transaction_bldr(sql, b) except Exception as e: print('sql_insert_has_parent', e) print_tb(e) def sql_insert_no_parent(commentid, parentid, comment, subreddit, time, score): try: sql = """INSERT INTO parent_reply (parent_id, comment_id, comment, subreddit, unix, score) VALUES (?, ?, ?, ?, ?, ?);""" b = [parentid, commentid, comment, subreddit, int(time), score] transaction_bldr(sql, b) except Exception as e: print('sql_insert_no_parent', e) print_tb(e) for timeframe in timeframes: with sqlite3.connect(get_db_path(timeframe)) as connection: c = connection.cursor() create_table_if_not_exists(c) row_counter = 0 paired_rows = 0 # with open(get_timeframe_path(timeframe), buffering=1000) as f: with open(get_timeframe_path(timeframe)) as f: for row in f: row_counter += 1 if row_counter >= start_row: try: row = json.loads(row) parent_id = row['parent_id'].split('_')[1] body = format_data(row['body']) created_utc = row['created_utc'] score = row['score'] comment_id = row['id'] subreddit = row['subreddit'] parent_data = find_parent(parent_id) existing_comment_score = find_existing_score(parent_id) if existing_comment_score: if score > existing_comment_score: if acceptable(body): sql_insert_replace_comment(comment_id, parent_id, parent_data, body, subreddit, created_utc, score) else: if acceptable(body): if parent_data: if score >= 2: sql_insert_has_parent(comment_id, parent_id, parent_data, body, subreddit, created_utc, score) paired_rows += 1 else: sql_insert_no_parent(comment_id, parent_id, body, subreddit, created_utc, score) except Exception as e: print(e) if row_counter % 100000 == 0: print('Total Rows Read: {}, Paired Rows: {}, Time: {}'.format(row_counter, paired_rows, str(datetime.now()))) # start from 0 start_row = 0 print('Done')
DuncteBot/chatbot
data_parser.py
data_parser.py
py
5,643
python
en
code
1
github-code
6
39697194939
# -*- coding: utf-8 -*- import scrapy from LaGou.items import LagouItem import LaGou.settings as settings class LagouSpider(scrapy.Spider): name = 'lagou' allowed_domains = ['https://www.lagou.com'] start_urls = ['http://https://www.lagou.com/'] def parse(self, response): if response.status==200: items=response.css('ul .con_list_item') data=LagouItem() for item in items: data['position']=item.css('.p_top .position_link h3::text').extract_first() data['company']=item.css('.company_name a::text').extract_first() data['sadd']=item.css('.p_top .add em::text').extract_first() data['salary']=item.css('.li_b_l .money::text').extract_first() data['claim']=item.css('.p_bot .li_b_l::text').extract()[-1].strip() # data['tags']=item.css('.list_item_bot .li_b_l span::text').extract() data['joburl']=item.css('.p_top .position_link::attr(href)').extract_first() yield data def start_requests(self): for page in range(3,settings.MAX_PAGE+1): url='https://www.lagou.com/jobs/list_%s?city=%s&cl=false&fromSearch=true'%(settings.KEY,settings.CITY) #输入uid yield scrapy.Request(url=url,callback=self.parse,meta={'page':page},dont_filter=True)
siqyka/Reptile
works/LaGou/LaGou/spiders/lagou.py
lagou.py
py
1,369
python
en
code
1
github-code
6
36818410851
import curses import curses.ascii from curses.textpad import Textbox class MyTextPad(Textbox): ignored_keys = { curses.KEY_PPAGE, # Page Up curses.KEY_NPAGE, # Page Down } def __init__(self, win, default): super().__init__(win) self.default = default self.line = default self._pos = len(default) self.refresh() @property def pos(self): return self._pos @pos.setter def pos(self, val): self._pos = val if self._pos < 0: self._pos = 0 if self._pos > len(self.line): self._pos = len(self.line) @property def cursor_pos(self): y = self.pos // self.maxx x = self.pos % self.maxx return y, x def refresh(self): self.win.clear() for y in range(self.maxy): self.win.addstr(y, 0, self.line[self.maxx * y:self.maxx * (y + 1)]) self.win.move(*self.cursor_pos) self.win.refresh() def do_command(self, ch): ordch = ord(ch) if isinstance(ch, str) else ch ch = chr(ch) if isinstance(ch, int) else ch if curses.KEY_BACKSPACE == ordch: self.line = self.line[:self.pos -1] + self.line[self.pos:] self.pos -= 1 elif curses.KEY_LEFT == ordch: self.pos -= 1 elif curses.KEY_RIGHT == ordch: self.pos += 1 elif curses.KEY_DOWN == ordch: self.pos += self.maxx elif curses.KEY_DC == ordch: self.line = self.line[:self.pos] + self.line[self.pos + 1:] elif curses.KEY_UP == ordch: self.pos -= self.maxx elif curses.KEY_HOME == ordch: self.pos = 0 elif curses.KEY_END == ordch: self.pos = len(self.line) elif ordch in self.ignored_keys: pass elif '\n' == ch: return 0 elif 27 == ordch: return -1 elif ch.isprintable(): self.line = self.line[:self.pos] + ch + self.line[self.pos:] self.pos += 1 return True def gather(self): return self.line.strip() def edit(self, validate=None): while 1: ch = self.win.get_wch() if validate: ch = validate(ch) if not ch: continue code = self.do_command(ch) if code == -1: return self.default if not code: break self.refresh() return self.gather()
AzaubaevViktor/tagging
console/my_textpad.py
my_textpad.py
py
2,571
python
en
code
0
github-code
6
37482811265
# coding: utf-8 import json import os import click import gql import graphql import requests from gql.transport.requests import RequestsHTTPTransport try: # python2 from urlparse import urlparse except ImportError: # python3 from urllib.parse import urlparse class SchemaSourceType(click.ParamType): name = 'schema_source' def __init__(self, authenvvar=None, **kwargs): self.authenvvar = authenvvar return super().__init__(**kwargs) def convert_from_url(self, value, param, ctx): headers = {} if self.authenvvar is not None: headers['Authorization'] = os.environ.get(self.authenvvar) try: client = gql.Client( transport=RequestsHTTPTransport( url=value, headers=headers, use_json=True, ), fetch_schema_from_transport=True, ) except requests.exceptions.HTTPError as e: m = str(e) if self.authenvvar is not None and e.response.status_code == 401: m += ' : Try setting %s in the environment.' % self.authenvvar self.fail(m, param=param, ctx=ctx) except ( requests.exceptions.ConnectionError, requests.exceptions.Timeout, requests.exceptions.RequestException ) as e: self.fail(e, param=param, ctx=ctx) return client.schema def convert_from_file(self, value, param, ctx): f = click.File('r').convert(value, param, ctx) try: introspection = json.load(f)['data'] schema = graphql.build_client_schema(introspection) except (ValueError, KeyError) as e: self.fail( 'File content is not valid a graphql schema %s.' % e, param=param, ctx=ctx ) return schema def convert(self, value, param, ctx): parsedurl = urlparse(value) if parsedurl.scheme and parsedurl.netloc: schema = self.convert_from_url(value, param, ctx) else: schema = self.convert_from_file(value, param, ctx) return schema SCHEMA_SOURCE = SchemaSourceType()
wapiflapi/gqldiff
gqldiff/clickgql.py
clickgql.py
py
2,223
python
en
code
1
github-code
6
6209107982
a = [] sum = 0 for i in range(9): a.append(int(input())) sum += a[i] a = sorted(a) i = 0 while i < 9: sum -= a[i] j = 0 while j < 9: if i != j and sum - a[j] == 100: break j += 1 if j < 9: break sum += a[i] i += 1 for k in range(9): if k != i and k != j: print(a[k])
jshyun912/BOJ
1000 ~ 5000/2309_일곱 난쟁이.py
2309_일곱 난쟁이.py
py
353
python
en
code
0
github-code
6
12992306131
''' Arbitrary parameters that matter a surprising amount for making photometry reasonably good. ''' __all__ = ['thresh', 'fwhm', 'radius', 'annuli_r'] # Thresholds for source identification after bias & flat fielding. # These must be low enough to catch good comparison stars. Too high, and # astrometry gets confused. thresh = {'g':600, 'r':1700, 'i':2200, 'z':1100} # FWHM for star finder. Set by coarsely measuring off ds9 images. fwhm = {'g':14, 'r':14, 'i':14, 'z':12} # Radii for aperture photometry. Hand-measured. radius = {'g':18, 'r':15, 'i':15, 'z':17} annuli_r = {'g':[21,24], 'r':[20,24], 'i':[20,24], 'z':[20,23]} # MANUAL CHOOSING: best for r: 8, i: (5 or 8, not sure) N_comp_stars = {'r':8, 'i':5, 'g':6, 'z':6}
lgbouma/tr56reduc
src/define_arbitrary_parameters.py
define_arbitrary_parameters.py
py
735
python
en
code
1
github-code
6
31626864136
#!/usr/bin/env python3 DEBUG = False def debug_print(s): print(s) def check(s): """Returns two booleans, the first is whether there are character twins. The second is whether there are character triplets.""" t = sorted(s) t.append("\n") has_twins = False has_triplets = False run_count = 1 prev = None for x in t: if x == prev: run_count = run_count + 1 else: if run_count == 2: has_twins = True elif run_count == 3: has_triplets = True run_count = 1 prev = x prev = x debug_print("{}: {} {}".format(s, has_twins, has_triplets)) return has_twins, has_triplets twins_count = 0 triplets_count = 0 with open("input.txt") as the_file: for line in the_file: s = line.strip() has_twins, has_triplets = check(s) if has_twins: twins_count = twins_count + 1 if has_triplets: triplets_count = triplets_count + 1 print(twins_count * triplets_count)
Combatjuan/adventofcode
2018/day02/day2a.py
day2a.py
py
1,044
python
en
code
0
github-code
6
39608443773
import datetime import os import re import urllib.parse from itertools import groupby from django import forms as django_forms from django.conf import settings from django.core.paginator import Paginator, InvalidPage from django import urls from django.forms import fields from django.http import HttpResponse, HttpResponseNotFound, Http404, \ HttpResponseRedirect, HttpResponsePermanentRedirect from django.shortcuts import render from django.shortcuts import get_object_or_404 from django.template import RequestContext from django.template.defaultfilters import filesizeformat from django.utils import html from django.views.decorators.vary import vary_on_headers from core.utils.url import unpack_url_path from core import models from core import solr_index from core.rdf import title_to_graph, issue_to_graph, page_to_graph from core.utils.utils import HTMLCalendar, _get_tip, _stream_file, \ _page_range_short, _rdf_base, get_page, label, create_crumbs from core.decorator import cache_page, rdf_view @cache_page(settings.DEFAULT_TTL_SECONDS) def issues(request, year=None): issues = models.Issue.objects.all().order_by('date_issued') year_view, select_year_form = _create_year_form(issues, year, True) page_title = "Browse All Issues" page_name = "issues" crumbs = list(settings.BASE_CRUMBS) return render(request, 'issues.html', locals()) @cache_page(settings.DEFAULT_TTL_SECONDS) def issues_title(request, lccn, year=None): title = get_object_or_404(models.Title, lccn=lccn) issues = title.issues.all() year_view, select_year_form = _create_year_form(issues, year, False) page_title = "Browse Issues: %s" % title.display_name page_name = "issues_title" crumbs = create_crumbs(title) return render(request, 'issues_title.html', locals()) @cache_page(settings.DEFAULT_TTL_SECONDS) def title_holdings(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) page_title = "Libraries that Have It: %s" % label(title) page_name = "holdings" crumbs = create_crumbs(title) holdings = title.holdings.select_related('institution').order_by('institution__name') return render(request, 'holdings.html', locals()) @cache_page(settings.DEFAULT_TTL_SECONDS) def title_marc(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) page_title = "MARC Bibliographic Record: %s" % label(title) page_name = "marc" crumbs = create_crumbs(title) return render(request, 'marc.html', locals()) @cache_page(settings.DEFAULT_TTL_SECONDS) @rdf_view def title_rdf(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) graph = title_to_graph(title) response = HttpResponse(graph.serialize(base=_rdf_base(request), include_base=True), content_type='application/rdf+xml') return response @cache_page(settings.DEFAULT_TTL_SECONDS) def title_atom(request, lccn, page_number=1): title = get_object_or_404(models.Title, lccn=lccn) issues = title.issues.all().order_by('-batch__created', '-date_issued') paginator = Paginator(issues, 100) try: page = paginator.page(page_number) except InvalidPage: raise Http404("No such page %s for title feed" % page_number) # figure out the time the title was most recently updated # via the create date of the batch issues = page.object_list num_issues = issues.count() if num_issues > 0: feed_updated = issues[0].batch.created else: feed_updated = title.created host = request.get_host() return render(request, 'title.xml', locals(), content_type='application/atom+xml') @cache_page(settings.DEFAULT_TTL_SECONDS) def title_marcxml(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) return HttpResponse(title.marc.xml, content_type='application/marc+xml') @cache_page(settings.DEFAULT_TTL_SECONDS) def issue_pages(request, lccn, date, edition, page_number=1): title = get_object_or_404(models.Title, lccn=lccn) _year, _month, _day = date.split("-") try: _date = datetime.date(int(_year), int(_month), int(_day)) except ValueError as e: raise Http404 try: issue = title.issues.filter(date_issued=_date, edition=edition).order_by("-created")[0] except IndexError as e: raise Http404 issue_pages = [] for page in issue.pages.all(): # include both issue and page because of how metadata # is being pulled in the template issue_pages.append({'issue': issue, 'page': page}) paginator = Paginator(issue_pages, 20) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) # set page number variables if page.has_previous(): previous_page_number = int(page_number) - 1 if page.has_next(): next_page_number = int(page_number) + 1 if not page.object_list: notes = issue.notes.filter(type="noteAboutReproduction") num_notes = notes.count() if num_notes >= 1: display_label = notes[0].label explanation = notes[0].text page_title = 'All Pages: %s, %s' % (label(title), label(issue)) page_head_heading = "All Pages: %s, %s" % (title.display_name, label(issue)) page_head_subheading = label(title) crumbs = create_crumbs(title, issue, date, edition) response = render(request, 'issue_pages.html', locals()) return response @cache_page(settings.DEFAULT_TTL_SECONDS) @rdf_view def issue_pages_rdf(request, lccn, date, edition): title, issue, page = _get_tip(lccn, date, edition) graph = issue_to_graph(issue) response = HttpResponse(graph.serialize(base=_rdf_base(request), include_base=True), content_type='application/rdf+xml') return response @cache_page(settings.DEFAULT_TTL_SECONDS) @vary_on_headers('Referer') def page(request, lccn, date, edition, sequence, words=None): fragments = [] if words: fragments.append("words=" + words) qs = request.META.get('QUERY_STRING') if qs: fragments.append(qs) if fragments: path_parts = dict(lccn=lccn, date=date, edition=edition, sequence=sequence) url = urls.reverse('openoni_page', kwargs=path_parts) return HttpResponseRedirect(url + "#" + "&".join(fragments)) title, issue, page = _get_tip(lccn, date, edition, sequence) if not page.jp2_filename: notes = page.notes.filter(type="noteAboutReproduction") num_notes = notes.count() if num_notes >= 1: explanation = notes[0].text else: explanation = "" # if no word highlights were requests, see if the user came # from search engine results and attempt to highlight words from their # query by redirecting to a url that has the highlighted words in it if not words: try: words = _search_engine_words(request) words = '+'.join(words) if len(words) > 0: path_parts = dict(lccn=lccn, date=date, edition=edition, sequence=sequence, words=words) url = urls.reverse('openoni_page_words', kwargs=path_parts) return HttpResponseRedirect(url) except Exception as e: if settings.DEBUG: raise e # else squish the exception so the page will still get # served up minus the highlights # Calculate the previous_issue_first_page. Note: it was decided # that we want to skip over issues with missing pages. See ticket # #383. _issue = issue while True: previous_issue_first_page = None _issue = _issue.previous if not _issue: break previous_issue_first_page = _issue.first_page if previous_issue_first_page: break # do the same as above but for next_issue this time. _issue = issue while True: next_issue_first_page = None _issue = _issue.next if not _issue: break next_issue_first_page = _issue.first_page if next_issue_first_page: break page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) page_head_heading = "%s, %s, %s" % (title.display_name, label(issue), label(page)) page_head_subheading = label(title) crumbs = create_crumbs(title, issue, date, edition, page) filename = page.jp2_abs_filename if filename: try: im = os.path.getsize(filename) image_size = filesizeformat(im) except OSError: image_size = "Unknown" image_credit = issue.batch.awardee.name host = request.get_host() static_url = settings.STATIC_URL template = "page.html" response = render(request, template, locals()) return response @cache_page(settings.DEFAULT_TTL_SECONDS) def titles(request, start=None, page_number=1): page_title = 'Newspaper Titles' if start: page_title += ' Starting With %s' % start titles = models.Title.objects.order_by('name_normal') titles = titles.filter(name_normal__istartswith=start.upper()) else: titles = models.Title.objects.all().order_by('name_normal') paginator = Paginator(titles, 50) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_start = page.start_index() page_end = page.end_index() page_range_short = list(_page_range_short(paginator, page)) browse_val = [chr(n) for n in range(65, 91)] browse_val.extend([str(i) for i in range(10)]) collapse_search_tab = True crumbs = list(settings.BASE_CRUMBS) return render(request, 'titles.html', locals()) @cache_page(settings.DEFAULT_TTL_SECONDS) def title(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) page_title = label(title) page_name = "title" # we call these here, because the query the db, they are not # cached by django's ORM, and we have some conditional logic # in the template that would result in them getting called more # than once. Short story: minimize database hits... related_titles = title.related_titles() succeeding_titles = title.succeeding_titles() preceeding_titles = title.preceeding_titles() notes = [] has_external_link = False for note in title.notes.all(): org_text = html.escape(note.text) text = re.sub('(http(s)?://[^\s]+[^\.])', r'<a class="external" href="\1">\1</a>', org_text) if text != org_text: has_external_link = True notes.append(text) if title.has_issues: rep_notes = title.first_issue.notes.filter(type="noteAboutReproduction") num_notes = rep_notes.count() if num_notes >= 1: explanation = rep_notes[0].text first_issue = title.first_issue if first_issue: issue_date = first_issue.date_issued # add essay info on this page from either the database or from a template first_essay = title.first_essay essay_template = os.path.join(settings.ESSAY_TEMPLATES, title.lccn+".html") crumbs = create_crumbs(title) response = render(request, 'title.html', locals()) return response @cache_page(settings.DEFAULT_TTL_SECONDS) def titles_in_city(request, state, county, city, page_number=1, order='name_normal'): state, county, city = list(map(unpack_url_path, (state, county, city))) page_title = "Titles in City: %s, %s" % (city, state) titles = models.Title.objects.all() if city: titles = titles.filter(places__city__iexact=city) if county: titles = titles.filter(places__county__iexact=county) if state: titles = titles.filter(places__state__iexact=state) titles = titles.order_by(order) titles.distinct() if titles.count() == 0: raise Http404 paginator = Paginator(titles, 50) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) return render(request, 'reports/city.html', locals()) @cache_page(settings.DEFAULT_TTL_SECONDS) def titles_in_county(request, state, county, page_number=1, order='name_normal'): state, county = list(map(unpack_url_path, (state, county))) page_title = "Titles in County: %s, %s" % (county, state) titles = models.Title.objects.all() if county: titles = titles.filter(places__county__iexact=county) if state: titles = titles.filter(places__state__iexact=state) titles = titles.order_by(order) titles = titles.distinct() if titles.count() == 0: raise Http404 paginator = Paginator(titles, 50) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) return render(request, 'reports/county.html', locals()) @cache_page(settings.DEFAULT_TTL_SECONDS) def titles_in_state(request, state, page_number=1, order='name_normal'): state = unpack_url_path(state) page_title = "Titles in State: %s" % state titles = models.Title.objects.all() if state: titles = titles.filter(places__state__iexact=state) titles = titles.order_by(order) titles = titles.distinct() if titles.count() == 0: raise Http404 paginator = Paginator(titles, 50) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) return render(request, 'reports/state.html', locals()) # TODO: this redirect can go away some suitable time after 08/2010 # it predates having explicit essay ids @cache_page(settings.DEFAULT_TTL_SECONDS) def title_essays(request, lccn): title = get_object_or_404(models.Title, lccn=lccn) # if there's only one essay might as well redirect to it if len(title.essays.all()) >= 1: url = title.essays.all()[0].url return HttpResponsePermanentRedirect(url) else: return HttpResponseNotFound() def _create_year_form(issues, year, all_issues): if issues.count() > 0: if year is None: _year = issues[0].date_issued.year else: _year = int(year) else: _year = 1900 # no issues available year_view = HTMLCalendar(firstweekday=6, issues=issues, all_issues=all_issues).formatyear(_year) dates = issues.dates('date_issued', 'year') class SelectYearForm(django_forms.Form): year = fields.ChoiceField(choices=((d.year, d.year) for d in dates), initial=_year) year.widget.attrs["class"] = "form-select w-auto d-inline-block" return year_view, SelectYearForm() def _search_engine_words(request): """ Inspects the http request and returns a list of words from the OCR text relevant to a particular search engine query. If the request didn't come via a search engine result an empty list is returned. """ # get the refering url referer = request.META.get('HTTP_REFERER') if not referer: return [] uri = urllib.parse.urlparse(referer) qs = urllib.parse.parse_qs(uri.query) # extract a potential search query from refering url if 'q' in qs: words = qs['q'][0] elif 'p' in qs: words = qs['p'][0] else: return [] # ask solr for the pre-analysis words that could potentially # match on the page. For example if we feed in 'buildings' we could get # ['building', 'buildings', 'BUILDING', 'Buildings'] depending # on the actual OCR for the page id that is passed in words = words.split(' ') words = solr_index.word_matches_for_page(request.path, words) return words @cache_page(settings.DEFAULT_TTL_SECONDS) def page_ocr(request, lccn, date, edition, sequence): title, issue, page = _get_tip(lccn, date, edition, sequence) page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) crumbs = create_crumbs(title, issue, date, edition, page) host = request.get_host() return render(request, 'page_text.html', locals()) def page_pdf(request, lccn, date, edition, sequence): title, issue, page = _get_tip(lccn, date, edition, sequence) return _stream_file(page.pdf_abs_filename, 'application/pdf') def page_jp2(request, lccn, date, edition, sequence): title, issue, page = _get_tip(lccn, date, edition, sequence) return _stream_file(page.jp2_abs_filename, 'image/jp2') def page_ocr_xml(request, lccn, date, edition, sequence): title, issue, page = _get_tip(lccn, date, edition, sequence) return _stream_file(page.ocr_abs_filename, 'application/xml') def page_ocr_txt(request, lccn, date, edition, sequence): title, issue, page = _get_tip(lccn, date, edition, sequence) try: text = page.ocr.text return HttpResponse(text, content_type='text/plain') except models.OCR.DoesNotExist: raise Http404("No OCR for %s" % page) @cache_page(settings.DEFAULT_TTL_SECONDS) @rdf_view def page_rdf(request, lccn, date, edition, sequence): page = get_page(lccn, date, edition, sequence) graph = page_to_graph(page) response = HttpResponse(graph.serialize(base=_rdf_base(request), include_base=True), content_type='application/rdf+xml') return response @cache_page(settings.DEFAULT_TTL_SECONDS) def page_print(request, lccn, date, edition, sequence, width, height, x1, y1, x2, y2): page = get_page(lccn, date, edition, sequence) title = get_object_or_404(models.Title, lccn=lccn) issue = page.issue page_title = "%s, %s, %s" % (label(title), label(issue), label(page)) crumbs = create_crumbs(title, issue, date, edition, page) host = request.get_host() image_credit = page.issue.batch.awardee.name path_parts = dict(lccn=lccn, date=date, edition=edition, sequence=sequence, width=width, height=height, x1=x1, y1=y1, x2=x2, y2=y2) url = urls.reverse('openoni_page_print', kwargs=path_parts) return render(request, 'page_print.html', locals()) @cache_page(settings.DEFAULT_TTL_SECONDS) def issues_first_pages(request, lccn, page_number=1): title = get_object_or_404(models.Title, lccn=lccn) issues = title.issues.all() if not issues.count() > 0: raise Http404("No issues for %s" % title.display_name) first_pages = [] for issue in issues: # include both issue and page because in some cases # an issue exists which has no associated pages first_pages.append({'issue': issue, 'page': issue.first_page}) paginator = Paginator(first_pages, 20) try: page = paginator.page(page_number) except InvalidPage: page = paginator.page(1) page_range_short = list(_page_range_short(paginator, page)) # set page number variables if page.has_previous(): previous_page_number = int(page_number) - 1 if page.has_next(): next_page_number = int(page_number) + 1 page_title = 'Browse Issues: %s' % label(title) page_head_heading = "Browse Issues: %s" % title.display_name page_head_subheading = label(title) crumbs = create_crumbs(title) return render(request, 'issue_pages.html', locals())
open-oni/open-oni
core/views/browse.py
browse.py
py
19,948
python
en
code
43
github-code
6
23262527075
__author__ = 'ravi' from pprint import pprint def get_word_count(file_name): content = {} for line in open(file_name): for word in line.rstrip().split(' '): content[word] = content.get(word, 0) + 1 return content words = get_word_count('mesg') pprint(words)
simula67/advanced-python-course-material
instructor-github/day2/wc.py
wc.py
py
294
python
en
code
0
github-code
6
72784612027
# https://www.codewars.com/kata/558d5c71c68d1e86b000010f from itertools import product as P from collections import Counter # precompute vampires = [] for L in (2,3): G = [range(0,10) for _ in range(L)] limit1 = 10**(2*L-1) limit2 = 10**L**2-1 for a,b in P(P(*G), P(*G)): p = int(''.join(map(str,a))) * int(''.join(map(str,b))) if limit1 < p < limit2 and Counter(str(p)) == Counter(map(str,a+b)) and a[-1]+b[-1] != 0: vampires.append(p) vampires = sorted(list(set(vampires))) def vampire_number(k): return vampires[k-1] # clever is_vampire = lambda x, y: sorted(f"{x}{y}") == sorted(f"{x*y}") and x%10 + y%10 > 0 vampires = sorted({x*y for p in (1, 2) for x in range(10**p, 10**(p+1)) for y in range(x, 10**(p+1)) if is_vampire(x, y)}) # ya clever vampires = set() for i in [1, 2]: for x, y in combinations(range(10**i, 10**(i+1)), 2): if x % 10 == 0 == y % 10: continue z = x * y if sorted(str(z)) == sorted(f'{x}{y}'): vampires.add(z) xs = sorted(vampires)
blzzua/codewars
7-kyu/vampire_numbers_less_than_1000000.py
vampire_numbers_less_than_1000000.py
py
1,072
python
en
code
0
github-code
6
15393461958
# -*- coding:utf-8 -*- # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution: def FindFirstCommonNode(self, pHead1, pHead2): # write code here stck1 = [] stck2 = [] h1,h2 = pHead1,pHead2 while h1: stck1.append(h1) h1 = h1.next while h2: stck2.append(h2) h2 = h2.next cur = None while stck1 and stck2: if stck1.pop() == stck2[-1]: cur = stck2.pop() else: return cur return cur
shakesVan/Playground
Nowcoder/52.py
52.py
py
613
python
en
code
0
github-code
6
2534734761
import unittest from KPIAlgebras.response_objects import response_objects from KPIAlgebras.request_objects import request_objects class TestResponseObjects(unittest.TestCase): def test_response_sucess_is_true(self): value = "test" response_sucess = response_objects.ResponseSuccess(value) self.assertTrue(bool(response_sucess)) def test_response_success_has_type_and_value(self): value = "test" response_success = response_objects.ResponseSuccess(value) self.assertEquals(response_success.value, value) self.assertEquals(response_success.type, response_objects.ResponseSuccess.SUCCESS) def test_response_failure_is_false(self): type = 'test)type' message = 'test_message' response_failure = response_objects.ResponseFailure(type, message) self.assertFalse(bool(response_failure)) def test_response_failure_has_type_and_message(self): type = 'parameterserror' message = 'test_message' response_failure = response_objects.ResponseFailure(type, message) self.assertEquals(response_failure.type, type) self.assertEquals(response_failure.message, message) def test_response_failure_has_value(self): type = 'parameterserror' message = 'test_message' response_failure = response_objects.ResponseFailure(type, message) self.assertDictEqual(response_failure.value, {'type': type, 'message':message}) def test_response_failure_build_from_invalid_request(self): invalid_request = request_objects.InvalidRequestObject() response_failure = response_objects.ResponseFailure.build_from_invalid_request(invalid_request) self.assertFalse(bool(response_failure)) self.assertEquals(response_failure.type, response_objects.ResponseFailure.PARAMETERS_ERROR) def test_response_failure_build_from_invalid_request_with_errors(self): invalid_request = request_objects.InvalidRequestObject() invalid_request.add_error("Target Node", 'is missing') invalid_request.add_error("Delta", 'is missing') response_failure = response_objects.ResponseFailure.build_from_invalid_request(invalid_request) self.assertFalse(bool(response_failure)) self.assertEquals(response_failure.type, response_objects.ResponseFailure.PARAMETERS_ERROR) self.assertEquals(response_failure.message, "Target Node: is missing\nDelta: is missing")
luisfsts/KPIAlgebras
tests/response_objects/test_response_objects.py
test_response_objects.py
py
2,483
python
en
code
0
github-code
6
40610907305
# Import libraries import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import missingno as msno from _datetime import date from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.neighbors import LocalOutlierFactor from sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler, RobustScaler pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.3f' % x) pd.set_option('display.width', 500) # Import the dataset for the small-scale applications def load(): data = pd.read_csv('01_miuul_machine_learning_summercamp/00_datasets/titanic.csv') data.columns = [col.lower() for col in data.columns] return data df = load() df.head() ############################## # Standard scaler ############################## ss = StandardScaler() df['age_standard_scaler'] = ss.fit_transform(df[['age']]) df.head() ############################## # Robust scaler ############################## rs = RobustScaler() df['age_robust_scaler'] = rs.fit_transform(df[['age']]) df.head() ############################## # MinMax scaler ############################## mms = MinMaxScaler() df['age_min_max_scaler'] = mms.fit_transform(df[['age']]) df.head() df.describe().T ############################## # Getting num_summary function ############################## def num_summary(dataframe, col_name, plot=False): """ for col in num_cols: print(f'\n***************-{col.upper()}-***************') num_summary(df, col, plot=False) """ quantiles = [0.05, 0.25, 0.50, 0.75, 0.90, 0.95, 0.99] print(dataframe[col_name].describe(quantiles).T) if plot: plt.figure(figsize=(12, 6)) plt.subplot(1, 2, 1) sns.boxplot(y=dataframe[col_name], data=dataframe) plt.subplot(1, 2, 2) sns.histplot(x=dataframe[col_name], data=dataframe) plt.show(block=True) age_cols = [col for col in df.columns if 'age' in col] for col in age_cols: print(f'\n***************-{col.upper()}-***************') num_summary(df, col, plot=True) ############################## # Converting numerical variables to categorical variables ############################## df['age_qcut'] = pd.qcut(df.age, 5) df.head() df['age_cut'] = pd.cut(df.age, bins=[0, 18, 25, 45, 60, 100], labels=['0_18', '19_25', '26_45', '46_60', '61_100']) df.head()
afatsumcemreg/feature_engineering
05_feature_scaling.py
05_feature_scaling.py
py
2,572
python
en
code
2
github-code
6
972412310
###Episode 1 import numpy as np import torch #Training data # Input (temp, rainfall, humidity) inputs = np.array([[73,67,43], [91,88,64], [87, 134, 58], [108, 43, 37], [69, 96,70]], dtype='float32') # targets (apples, oranges) targets = np.array([[56, 70], [81,101], [119, 133], [22,37], [103, 119]], dtype = 'float32') #Convert inputs and targets to tensors inputs = torch.from_numpy(inputs) targets = torch.from_numpy(targets) print (inputs) print(targets) ###Linear Regression model #Weights and Biases w = torch.randn(2, 3, requires_grad = True) b = torch.randn(2, requires_grad = True) print(w) print(b) ###Model def model(x): return x @w.t() + b # Generate predictions preds = model(inputs) print(preds) #Compare with actual targets print(targets) ### Loss Function : to check how well our model is performing # Calculate the difference betweeen the two matrices (preds and targets). # Square all the elements of the difference matrix to remove negative values. # Calaculate the average of the elements in the resulting matrix. #diff = preds - targets #print(diff) #diff_sqr = diff * diff #torch.sum(diff__sqr)/diff.numel() #Mean Squared Error (MSE) loss def mse(t1, t2): diff = t1 - t2 return torch.sum(diff * diff) / diff.numel() # Compute Loss loss = mse(preds, targets) print(loss) # Compute Gradients loss.backward() # Gradients for weights print(w) print(w.grad) # Gradients for biases print(b) print(b.grad) # Reset the values of Grad values NOT the actual values w.grad.zero_() b.grad.zero_() print(w) print(b) ### Adjust weights and biases using gradient descent # Generate predictions # Calculate the loss # Compute gradients w.r.t weights and biases # Adjust the weights by subtracting a small quantity proportional to the gradient # Reset gradients to zero # Generate predictions preds = model(inputs) print(preds) # Calculate the loss loss = mse (preds, targets) print(loss) # Compute Gradients loss.backward() print(w.grad) print(b.grad) # Adjust the weights and rest gradients with torch.no_grad(): w -= w.grad * 1e-5 b -= b.grad * 1e-5 w.grad.zero_() b.grad.zero_() print(w) print(b) #Lets check if the above slight change in the weights have improved the loss value (i.e decrease) or not. #Calculate loss again preds = model(inputs) loss = mse (preds, targets) print(loss) # Train for multiple epochs (let say for 500 epochs) for i in range (500): preds =model(inputs) loss = mse(preds, targets) loss.backward() # To calculate the gradients with torch.no_grad(): w -= w.grad * 1e-5 # 1e-5 is Learning rate which is a hyper-parameter in machine learning b -= b.grad * 1e-5 w.grad.zero_() # Reset the gradients to zero b.grad.zero_() ### Lets, calculate the loss again # Calculate loss preds = model(inputs) loss = mse (preds, targets) print(loss) ### Lets compare predictions and targets(actual values) #Predictions print(preds) #Targets (Actual values) print(targets) ##### Working with Jovian ##Intall #pip install jovian --upgrade -q #import jovian #jovian.commit() ################# Linear regression using PyTorch built-insert without making manual functions import numpy as np import torch import torch.nn as nn # Input (temp, rainfall, humidity) inputs = np.array([[73,67,43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91,88,64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64],[87, 134, 58], [102, 43, 37], [69, 96, 70]], dtype = 'float32') #targets (apples, oranges) targets =np.array([[56, 70], [81, 101], [119, 133], [22, 37], [103, 119], [56, 70], [81, 101], [119, 133], [22, 37], [103, 119], [56, 70], [81, 101], [119, 133], [22, 37], [103, 119]], dtype = 'float32') inputs = torch.from_numpy(inputs) targets = torch.from_numpy(targets) ###Dataset and dataloader # We are not going to use complete dataset but in batches to deal with memory issues and less complex computations from torch.utils.data import TensorDataset # Define Dataset using TensorDataset train_ds = TensorDataset(inputs, targets) train_ds[0:3] # You can also pick specific rows of data) #train_ds[[1, 3, 5, 7]] from torch.utils.data import DataLoader #Define DataLoader batch_size = 5 train_dl = DataLoader(train_ds, batch_size, shuffle=True) for xb, yb in train_dl: #print('batch:') # prints all the batches print(xb) print(yb) break # use break if you want to work only one batch and remove the line 179 that prints all batches # Define model using nn.Linear model = nn.Linear (3, 2) print(model.weight) print(model.bias) # Parameters list(model.parameters()) #Generate predictions preds = model(inputs) print(preds) ###Loss Functions # Import nn.functional import torch.nn.functional as F # Define Loss functional loss_fn = F.mse_loss loss = loss_fn(model(inputs),targets) print(loss) # Note: to read help on Linear model of pytorch use following line # ?nn.Linear # ?F.mse_loss ###Optimizer # Define optimizer (Stochastic Gradient Descent) opt = torch.optim.SGD(model.parameters(), lr=1e-5) ### Train the model # Generate predictions # Calculate the loss # Compute gradients w.r.t weights and biases # Adjust the weights by subtracting a small quantity proportional to the gradient # Reset gradients to zero #Utility fiunction to train the model def fit(num_epochs, model, loss_fn, opt): #Repeat for given number of epochs for epoch in range(num_epochs): #Train with batches of data for xb, yb in train_dl: #1. Generate predictions pred = model(xb) #2. Calculate loss loss = loss_fn(pred, yb) #3. Compute gradients loss.backward() #4. Update parameters using gradients opt.step() #5. reset the gradients to zero opt.zero_grad() #Print the progress if (epoch+1) % 10 == 0: print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) # loss.item gives the actual value of th eloss in that batch (here after every 10th epoch) # Train the model for 100 epochs fit(100, model, loss_fn, opt) # Generate predictions preds = model(inputs) print(preds) # Compare the weights print(targets) ### Commit and update the notebook import jovian jovian.commit()
NancyGirdhar/PyTorch_Basics
PyTorchSeries_E1.py
PyTorchSeries_E1.py
py
6,595
python
en
code
0
github-code
6
36310398862
import pygame import colors import config class Field(pygame.sprite.Sprite): def __init__(self, x, y, color=None, img_path=None): super().__init__() self.x = x self.y = y self.color = color # sprite image if img_path: img = pygame.image.load(img_path).convert_alpha() img = pygame.transform.scale(img, [config.FIELD_WIDTH, config.FIELD_HEIGHT]) self.base_img = img self.image = img self.rect = self.image.get_rect() self.rect.x = x * config.FIELD_WIDTH self.rect.y = y * config.FIELD_HEIGHT else: self.rect = pygame.Rect(x * config.FIELD_WIDTH, y * config.FIELD_HEIGHT, config.FIELD_WIDTH, config.FIELD_HEIGHT) self.image = None def draw(self, screen): # draw field (background) if self.image: screen.blit(self.image, (self.rect.x, self.rect.y)) # # draw field border # pygame.draw.rect(screen, colors.WHITE, self.rect, 1)
tobnie/human_planning_horizon
game/world/field.py
field.py
py
1,046
python
en
code
0
github-code
6
1370868487
"""Quizzes user on terms and definitions.""" import csv import random from collections import namedtuple RTN = lambda: '\n' def open_csv_populate_dct(): """Import a csv and populate a dictionary with its contents.""" dct = {} with open('csvs/terms_and_definitions.csv') as f: F_CSV = csv.reader(f) ROW = namedtuple('Row', next(F_CSV)) for r in F_CSV: row = ROW(*r) dct[row.term] = row.definition return dct def quiz_user(): """Quiz user.""" lst = [] print(RTN()) for term, definition in sorted(TERMS_AND_DEFINITIONS.items(), key=lambda x: random.random()): print(term) user_answer = input('> ') random.choice(list(TERMS_AND_DEFINITIONS)) if user_answer == definition: print('correct') lst.append((term, 'correct')) print(RTN()) else: print('work on that one') print(f'The correct answer is: {definition}') lst.append((term, 'incorrect')) print(RTN()) return lst def count_results(): """Count correct answers.""" lst = [] for i in results: if i[1] == 'correct': lst.append('correct') else: pass return lst def calc_perc(correct_answers, total): """Calculate percentage of correct answers.""" perc = len(correct_answers) / total * 100 perc_correct = '{0:.2f}%'.format(perc) print(f'percent correct: {perc_correct}\n') TERMS_AND_DEFINITIONS = open_csv_populate_dct() TERMS_TOTAL = len(TERMS_AND_DEFINITIONS) results = quiz_user() corrects = count_results() calc_perc(corrects, TERMS_TOTAL)
craighillelson/terms_and_definitions
terms_and_definitions.py
terms_and_definitions.py
py
1,717
python
en
code
0
github-code
6
22610452246
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from .models import Hybrid, Specialization, ContactPerson, Subject class MyUserAdmin(UserAdmin): model = Hybrid fieldsets = UserAdmin.fieldsets + ( (None, {'fields': ( 'middle_name', 'member', 'graduation_year', 'image', 'gender', 'specialization', 'date_of_birth', 'title', 'card_key' )}), ) class ContactPersonAdmin(admin.ModelAdmin): model = ContactPerson list_display = ('title', 'search_name') admin.site.register(Hybrid, MyUserAdmin) admin.site.register(Specialization) admin.site.register(ContactPerson, ContactPersonAdmin) admin.site.register(Subject)
hybrida/hybridjango
apps/registration/admin.py
admin.py
py
792
python
en
code
4
github-code
6
35914492844
class Point: def __init__(self, a=0, b=0): self.x = a self.y = b class Solution: def numIslands2(self, n: int, m: int, operators: list) -> list: directions = [(0, -1), (0, 1), (1, 0), (-1, 0)] matrix = [0] * (n * m) father = [i for i in range(n * m)] result = [] for p in operators: if matrix[self.point_to_int(p.x, p.y, m)] == 1: result.append(result[-1]) continue matrix[self.point_to_int(p.x, p.y, m)] = 1 self.connect(father, self.point_to_int(p.x, p.y, m), self.point_to_int(p.x, p.y, m)) temp = 1 if len(result) > 0: temp += result[-1] for d in directions: next_x = p.x + d[0] next_y = p.y + d[1] if n > next_x >= 0 and 0 <= next_y < m and matrix[self.point_to_int(next_x, next_y, m)] == 1: root_a = self.find(father, self.point_to_int(p.x, p.y, m)) root_b = self.find(father, self.point_to_int(next_x, next_y, m)) if root_b != root_a: self.connect(father, root_a, root_b) temp -= 1 result.append(temp) return result def connect(self, father: list, a_index: int, b_index: int) -> None: father[b_index] = a_index def find(self, father: list, nodeIndex: int) -> int: path = [] while father[nodeIndex] != nodeIndex: path.append(nodeIndex) nodeIndex = father[nodeIndex] for n in path: father[n] = nodeIndex return nodeIndex def point_to_int(self, x: int, y: int, m: int) -> int: return x * m + y
Super262/LintCodeSolutions
data_structures/union_find/problem0434.py
problem0434.py
py
1,760
python
en
code
1
github-code
6
21867315415
import phywhisperer.interface.naeusb as NAE import phywhisperer.interface.program_fpga as LLINT import os import re import logging import pkg_resources import threading import time from phywhisperer.interface.bootloader_sam3u import Samba from phywhisperer.sniffer import USBSniffer, USBSimplePrintSink from phywhisperer.protocol import PWPacketDispatcher, PWPacketHandler, IncompletePacket from zipfile import ZipFile from phywhisperer.firmware.phywhisperer import getsome class Usb(PWPacketDispatcher): """PhyWhisperer-USB Interface""" MAX_PATTERN_LENGTH = 64 def __init__ (self, viewsb=False): """ Set up PhyWhisperer-USB device. Args: viewsb: Should only be set to 'True' when this is called by ViewSB. """ self.viewsb = viewsb self.addpattern = False self.short_timestamps = [0] * 2**3 self.long_timestamps = [0] * 2**16 self.stat_pattern_match_value = 0 self.capture_size = 8188 # default to FIFO size self.usb_trigger_freq = 240E6 #internal frequency used for trigger ticks self.entries_captured = 0 self.expected_verilog_matches = 80 self.slurp_defines() # Set up the PW device to handle packets in ViewSB: if viewsb: super().__init__(verbose=False) self.sniffer = USBSniffer() self.register_packet_handler(self.sniffer) def slurp_defines(self): """ Parse Verilog defines file so we can access register and bit definitions by name and avoid 'magic numbers'. """ self.verilog_define_matches = 0 defines_files = [pkg_resources.resource_filename('phywhisperer', 'firmware/defines_pw.v'), pkg_resources.resource_filename('phywhisperer', 'firmware/defines_usb.v')] for i,defines_file in enumerate(defines_files): defines = open(defines_file, 'r') define_regex_base = re.compile(r'`define') define_regex_reg = re.compile(r'`define\s+?REG_') define_regex_radix = re.compile(r'`define\s+?(\w+).+?\'([bdh])([0-9a-fA-F]+)') define_regex_noradix = re.compile(r'`define\s+?(\w+?)\s+?(\d+?)') for define in defines: if define_regex_base.search(define): reg = define_regex_reg.search(define) match = define_regex_radix.search(define) if reg: if i == 0: block_offset = self.MAIN_REG_SELECT << 6 else: block_offset = self.USB_REG_SELECT << 6 else: block_offset = 0 if match: self.verilog_define_matches += 1 if match.group(2) == 'b': radix = 2 elif match.group(2) == 'h': radix = 16 else: radix = 10 setattr(self, match.group(1), int(match.group(3),radix) + block_offset) else: match = define_regex_noradix.search(define) if match: self.verilog_define_matches += 1 setattr(self, match.group(1), int(match.group(2),10) + block_offset) else: logging.warning("Couldn't parse line: %s", define) defines.close() assert self.verilog_define_matches == self.expected_verilog_matches, "Trouble parsing Verilog defines files: didn't find the right number of defines (expected %d, got %d)." % (self.expected_verilog_matches, self.verilog_define_matches) def con(self, PID=0xC610, sn=None, program_fpga=True, bitstream_file=None): """Connect to PhyWhisperer-USB. Raises error if multiple detected Args: PID (int, optional): USB PID of PhyWhisperer, defaults to 0xC610 (NewAE standard). sn (int, option): Serial Number of PhyWhisperer, required when multiple PhyWhisperers are connected. program_fpga (bool, option): Specifies whether or not to program the FPGA with the default firmware when we connect. Set to False if using custom bitstream. """ self.usb = NAE.NAEUSB() self.usb.con(idProduct=[PID], serial_number=sn) self._llint = LLINT.PhyWhispererUSB(self.usb) if program_fpga: if bitstream_file is None: with ZipFile(getsome("phywhisperer-firmware.zip")) as myzip: with myzip.open('phywhisperer_top.bit') as bitstream: self._llint.FPGAProgram(bitstream) pass else: print("Programming custom bit stream '%s'" % bitstream_file) with open(bitstream_file,"rb") as bitstream: self._llint.FPGAProgram(bitstream) self.write_reg(self.REG_COUNT_WRITES, [1]) def set_power_source(self, src): """Set power source for target. Args: src (str): * "5V" for power from this computer (via 'Control' USB port). * "host" for power from the host of the connection we're sniffing. * "off" for no power. """ if src == "5V": self._llint.changePowerSource(self._llint.PWR_SRC_5V) pass elif src == "host": self._llint.changePowerSource(self._llint.PWR_SRC_HOST) pass elif src == "off" or src is None or src == False: self._llint.changePowerSource(self._llint.PWR_SRC_OFF) pass else: raise AttributeError("Unknown source %s, valid sources: '5V', 'host', 'off'") def reset_fpga(self): """ Reset FPGA registers to defaults, use liberally to clear incorrect states. """ #self._llint.resetFPGA() self.write_reg(self.REG_RESET_REG, [1]) self.write_reg(self.REG_RESET_REG, [0]) self.write_reg(self.REG_COUNT_WRITES, [1]) def load_bitstream(self, bitfile): """Load bitstream onto FPGA""" if not os.path.isfile(bitfile): raise ValueError("Cannot find specified bitfile {}".format(bitfile)) bitstream = open(bitfile, "rb") self._llint.FPGAProgram(bitstream) pass def auto_program(self): """ Erases the firmware of the onboard SAM3U, and reprograms it with default firmware Attempts to autodetect the COM PORT that the SAM3U shows up as. If this fails, it will be necessary to flash new firmware via :code:`program_sam3u` """ import time, serial.tools.list_ports before = serial.tools.list_ports.comports() before = [b.device for b in before] time.sleep(0.5) self.erase_sam3u() time.sleep(1.5) after = serial.tools.list_ports.comports() after = [a.device for a in after] candidate = list(set(before) ^ set(after)) if len(candidate) == 0: raise OSError("Could not detect COMPORT. Continue using programmer.program()") com = candidate[0] print("Detected com port {}".format(com)) self.program_sam3u(com) def erase_sam3u(self): """Erase the SAM3U Firmware, which forces it into bootloader mode.""" self._llint.eraseFW(confirm=True) def program_sam3u(self, port, fw_path=None): """Program the SAM3U Firmware assuming device is in bootloader mode. Args: port (str): Serial port name, such as 'COM36' or '/dev/ttyACM0'. fw_path (str): Path to firmware binary to program the sam3u with. If None, use default firmware. Defautls to None. """ fw_data = None print("Opening firmware...") if fw_path is None: print("Firmware not specified. Using firmware/phywhisperer.py") fw_data = getsome("phywhisperer-SAM3U1C.bin").read() else: if not os.path.isfile(fw_path): raise ValueError("Cannot find specified firmware file {}".format(fw_path)) fw_data = open(fw_path, "rb").read() sam = Samba() print("Opened!\nConnecting...") sam.con(port) print("Connected!\nErasing...") sam.erase() print("Erased!\nProgramming file {}...".format(fw_path)) sam.write(fw_data) print("Programmed!\nVerifying...") if sam.verify(fw_data): print("Verify OK!") sam.flash.setBootFlash(True) print("Bootloader disabled. Please power cycle device.") else: print("Verify FAILED!") sam.ser.close() def set_usb_mode(self, mode='auto'): """Set USB PHY speed. Args: mode (str): * "LS": manually set the PHY to low speed. * "FS": manually set the PHY to full speed. * "HS": manually set the PHY to high speed. * "auto": Default. PW will attempt to automatically determine the speed when the target is connected. Mode must be set to 'auto' prior to connecting or powering up the target, otherwise speed cannot be determined correctly. Setting the mode to 'auto' actively causes PW to try to determine the speed. """ if mode == 'auto': self.write_reg(self.REG_USB_SPEED, [self.USB_SPEED_AUTO]) elif mode == 'LS': self.write_reg(self.REG_USB_SPEED, [self.USB_SPEED_LS]) elif mode == 'FS': self.write_reg(self.REG_USB_SPEED, [self.USB_SPEED_FS]) elif mode == 'HS': self.write_reg(self.REG_USB_SPEED, [self.USB_SPEED_HS]) else: raise ValueError('Invalid mode %s; specify auto, LS, FS, or HS.' % mode) pass def write_reg(self, address, data): """Write a PhyWhisperer register. Args: address: int data: bytes """ return self.usb.cmdWriteMem(address, data) def read_reg(self, address, size=1): """Reads a PhyWhisperer register. Args: address: int size: int, number of bytes to read Returns: """ return self.usb.cmdReadMem(address, size) def get_usb_mode(self): """Returns USB PHY speed. Return values: - 'auto': the speed has not been determined yet (was the mode set to 'auto' _before_ the target was connected or powered up?). - 'LS': low speed - 'FS': full speed - 'HS: high speed """ value = self.read_reg(self.REG_USB_SPEED)[0] if value == self.USB_SPEED_AUTO: return 'auto' elif value == self.USB_SPEED_LS: return 'LS' elif value == self.USB_SPEED_FS: return 'FS' elif value == self.USB_SPEED_HS: return 'HS' else: raise ValueError('Internal error: REG_USB_SPEED register contains invalid value %d.' % value) def read_capture_data(self, entries=0, verbose=False, blocking=False, burst_size=8192, timeout=5): """Read from USB capture memory. Args: blocking (bool, optional): * True: wait for data to be available before reading (slower). * False: read immediately, with underflow protection, all of the captured data, until PW tells us we've read everything that it captured ('entries' is ignored). entries (int, optional): When blocking=True, number of capture entries to read. If not specified, read all the captured data. Cannot be greater than capture size, as set by set_capture_size(). burst_size (int, optional): When blocking=False, size of burst FIFO reads, defaults to 8192. timeout (int, optional): timeout in seconds (ignored if 0, defaults to 5) verbose (bool, optional): Print extra debug info. Returns: List of captured entries. Each list element is itself a 3-element list, containing the 3 bytes that make up a capture entry. Can be parsed by split_packets() or split_data(). See software/phywhisperer/firmware/defines.v for definition of the FIFO data fields. """ data = [] starttime = time.time() self.entries_captured = 0 if blocking: entries_read = 0 if not entries: entries = self.capture_size elif entries > self.capture_size: raise ValueError('Error: requested to read %d entries but only %d were captured.' % (entries, self.capture_size)) while entries_read < entries: while self.fifo_empty(): if timeout and time.time() - starttime > timeout: logging.warning("Capture timed out!") break data.append(self.read_reg(self.REG_SNIFF_FIFO_RD, 4)[1:4]) entries_read += 1 else: notdone = True early_exit = False raw = [] while notdone: raw.extend(self.read_reg(self.REG_SNIFF_FIFO_RD, 4*burst_size)) # check CAPTURE_DONE and EMPTY flags on last entry read: bitmask = 2**self.FE_FIFO_STAT_CAPTURE_DONE + 2**self.FE_FIFO_STAT_EMPTY if raw[-1] & bitmask == bitmask: notdone = False # did we also overflow? if raw[-1] & 2**self.FE_FIFO_STAT_OVERFLOW_BLOCKED: logging.warning("FIFO overflowed, capture stopped.") early_exit = True elif timeout and time.time() - starttime > timeout: logging.warning("Capture timed out!") notdone = False early_exit = True # reformat the return array and at the same time, filter out the (possibly numerous) empty FIFO reads: for i in range(int(len(raw)/4)): if raw[i*4+3] & 3 != self.FE_FIFO_CMD_STRM: data.append(raw[i*4+1:i*4+4]) self.entries_captured = len(data) if early_exit: logging.warning("%d entries captured." % self.entries_captured) if len(data): # maybe we only got empty reads if data[-1][2] & 2**self.FE_FIFO_STAT_UNDERFLOW: logging.warning("Capture FIFO underflowed!") return data def split_data(self, rawdata, verbose=False): """Split raw USB capture data into data events and times, stat events and times. Args: rawdata: list of lists, e.g. obtained from read_capture_data() Returns: 4-tuple of lists: 0. data event times 1. data bytes corresponding to data event times 2. USB status update times 3. USB status bytes corresponding to status update times """ timestep = 0 data_bytes = [] data_times = [] stat_bytes = [] stat_times = [] last_flags = 0xff for raw in rawdata: command = raw[2] & 0x3 if (command == self.FE_FIFO_CMD_DATA): data = raw[1] ts = raw[0] & 0x7 self.short_timestamps[ts] += 1 timestep += ts flags = (raw[0] & 0xf8) >> 3 if verbose: print("%8d flags=%02x data=%02x"%(timestep, flags, data)) # only log flags if they've changed: if flags != last_flags: stat_bytes.append(flags) stat_times.append(timestep) last_flags = flags data_bytes.append(data) data_times.append(timestep) elif (command == self.FE_FIFO_CMD_STAT): ts = raw[0] & 0x7 self.short_timestamps[ts] += 1 timestep += ts flags = (raw[0] & 0xf8) >> 3 if verbose: print("%8d flags=%02x"%(timestep, flags)) stat_bytes.append(flags) stat_times.append(timestep) last_flags = flags elif (command == self.FE_FIFO_CMD_TIME): ts = raw[0] + (raw[1] << 8) self.long_timestamps[ts] += 1 #Unlike stat and data commands, we don't add one here; if we did #we'd be overcounting in the common case where a time command immediately #preceeds a stat or data command. Consequence is that timestep will be off #by one in the case of lone time commands (which is rare, and inconsequential #in practice). timestep += ts if verbose: print("%8d" % timestep) elif (command == self.FE_FIFO_CMD_STRM): # nothing to do or report # CAUTION: don't even print a status in verbose mode because we can be # receiving TONS of these! pass else: print ("ERROR: unknown command (%d)" % command) return (data_times, data_bytes, stat_times, stat_bytes) def split_packets(self, rawdata): """Split raw USB capture data into packets. Args: rawdata: list of lists, e.g. obtained from read_capture_data() Returns: list Each list element is one packet and is presented in a dictionary with the following keys: * 'timestamp' * 'size' in bytes * 'contents' list of bytes """ # operates destructively so make a copy: rawdata_copy = rawdata[:] handler = PWPacketHandler() packets = [] incomplete = False while rawdata_copy and not incomplete: # use ViewSB code to avoid duplicating it here: try: packets.append(handler.handle_bytes_received(defines=self, data=rawdata_copy)) except IncompletePacket: incomplete = True continue return packets def print_packets(self, packets): """Print packets using USBSimplePrintSink from ViewSB. Args: packets: list of dictionaries, e.g. obtained from split_packets() """ printer = USBSimplePrintSink(highspeed=self.get_usb_mode() == 'HS') for packet in packets: printer.handle_usb_packet(ts=packet['timestamp'], buf=bytearray(packet['contents']), flags=(packet['flags'])) @staticmethod def print_flags(stat_byte): """Print bitfields of USB status flags byte. """ print('vbus_valid = %d' % (1 if stat_byte & 0x10 else 0)) print('sess_end = %d' % (1 if stat_byte & 0x08 else 0)) print('sess_valid = %d' % (1 if stat_byte & 0x04 else 0)) print('rx_error = %d' % (1 if stat_byte & 0x02 else 0)) print('rx_active = %d' % (1 if stat_byte & 0x01 else 0)) def set_capture_size(self, size=8188): """Set how many events to capture (events include data, USB status, and timestamps). Args: size(int, option): number of events to capture. 0 = unlimited (until overflow). Max = 2^24-1. Since the capture FIFO can hold 8188 events, setting this to > 8188 may result in overflow. """ if (size >= 2**24) or (size < 0): raise ValueError('Illegal size value.') self.capture_size = size self.write_reg(self.REG_CAPTURE_LEN, int.to_bytes(size, length=2, byteorder='little')) def ns_trigger(self, delay_in_ns): """Convert a nS number to delay or width cycles for set_trigger()""" cycles = (float(delay_in_ns) * 1.0E-9) / (1.0 / float(self.usb_trigger_freq)) return round(cycles) def us_trigger(self, delay_in_us): """Convert a uS number to delay or width cycles for set_trigger()""" cycles = (float(delay_in_us) * 1.0E-6) / (1.0 / float(self.usb_trigger_freq)) return round(cycles) def ms_trigger(self, delay_in_ms): """Convert a mS number to delay or width cycles for set_trigger()""" cycles = (float(delay_in_ms) * 1.0E-3) / (1.0 / float(self.usb_trigger_freq)) return round(cycles) def set_trigger(self, num_triggers=1, delays=[0], widths=[1], enable=True): """Program the output trigger pulse(s) delay and width. Both are measured in clock cycles of USB-derived 240 MHz clock. Note that this is a different time base than set_capture_delay(), which uses a 60 MHz clock! Up to 8 pulses may be issued. The capture delay is automatically set to match the trigger delay; use set_capture_delay to set it to a different value. Use ns_trigger(), us_trigger(), and ms_trigger() to convert values as needed. Args: num_triggers (int): number of trigger pulses, from 1 to 8. delay (list of ints): delay for each trigger pulse; each element in range [0, 2^20-1] cycles (only first element can be zero). width (list of ints): width for each trigger pulse; each element in range [1, 2^17-1] cycles. enable (bool, optional): set to 'False' to disable trigger generation on hardware pins. Examples: (a) To set obtain three 2-cycle-wide pulses, each 3 cycles apart, starting immediately after a pattern match: set_trigger(num_triggers=3, delays=[0,3,3], widths=[2,2,2]) (b) To set obtain a 1-cycle wide pulse 10 cycles after a pattern match, followed by a 2-cycle wide pulse 20 cycles later: set_trigger(num_triggers=2, delays=[10,20], widths=[1,2]) """ if num_triggers > 8: raise ValueError('Maximum 8 trigger pulses.') if len(delays) != num_triggers or len(widths) != num_triggers: raise ValueError('Number of elements in delays and widths must match num_triggers.') data = 0 for i in range(num_triggers): delay = delays[i] if (delay >= 2**20) or (delay < 0) or (delay < 1 and i > 0): raise ValueError('Illegal delay value.') data += delay << i*24 self.write_reg(self.REG_TRIGGER_DELAY, int.to_bytes(data, length=3*num_triggers, byteorder='little')) data = 0 for i in range(num_triggers): width = widths[i] if (width >= 2**17) or (width < 1): raise ValueError('Illegal width value.') data += width << i*24 self.write_reg(self.REG_TRIGGER_WIDTH, int.to_bytes(data, length=3*num_triggers, byteorder='little')) self.write_reg(self.REG_NUM_TRIGGERS, [num_triggers]) self.set_capture_delay(int(delay/4)) if enable == True: self.write_reg(self.REG_TRIGGER_ENABLE, [1]) else: self.write_reg(self.REG_TRIGGER_ENABLE, [0]) def set_capture_delay(self, delay): """Program the capture delay, measured in clock cycles of USB-derived 60 MHz clock. Note that this is a different time base than set_trigger(), which uses a 240 MHz clock! Args: delay (int): range in [0, 2^18-1] cycles of 60 MHz clock. """ if (delay >= 2**18) or (delay < 0): raise ValueError('Illegal delay value.') self.write_reg(self.REG_CAPTURE_DELAY, int.to_bytes(delay, length=3, byteorder='little')) def set_pattern(self, pattern, mask=None): """Set the pattern and its bitmask used for capture and trigger output. Args: pattern (list of ints): list of between 1 and 64 bytes mask (list, optional): list of bytes, must have same size as 'pattern' if set. Defaults to [0xff]*len(pattern) if not set. """ if mask is None: mask = [0xFF] * len(pattern) if len(pattern) != len(mask): raise ValueError('pattern and mask must be of same size.') elif len(pattern) > self.MAX_PATTERN_LENGTH: raise ValueError('pattern and mask cannot be more than 64 bytes.') # extend the mask to full width (cheaper to do here than in HW): mask = [0]* (self.MAX_PATTERN_LENGTH - len(mask)) + mask self.write_reg(self.REG_PATTERN, pattern[::-1]) self.write_reg(self.REG_PATTERN_MASK, mask[::-1]) self.write_reg(self.REG_PATTERN_BYTES, [len(pattern)]) self.pattern = pattern self.mask = mask def arm(self): """Arm PhyWhisperer for capture and optionally generating a trigger. Use set_pattern to program the pattern and bitmask which will initiate the capture and/or trigger operation. Use set_trigger to program the trigger parameters. Use set_capture_size and set_capture_delay to program the capture parameters. """ self.write_reg(self.REG_ARM, [1]) def check_fifo_errors(self, underflow=0, overflow=0): """Check whether an underflow or overflow occured on the capture FIFO. Args: underflow (int, optional): expected status, 0 or 1 overflow (int, optional): expected status, 0 or 1 """ status = self.read_reg(self.REG_SNIFF_FIFO_STAT, 1)[0] fifo_underflow = (status & 2) >> 1 fifo_overflow = (status & 16) >> 4 assert fifo_underflow == underflow assert fifo_overflow == overflow def fifo_empty(self): """Returns True if the capture FIFO is empty, False otherwise. """ if self.read_reg(self.REG_SNIFF_FIFO_STAT, 1)[0] & 1: return True else: return False def fifo_over_empty_threshold(self): """Returns True if the capture FIFO has more entries than the empty threshold (128). """ fifo_stat = self.read_reg(self.REG_SNIFF_FIFO_STAT, 1)[0] fifo_empty = fifo_stat & 1 fifo_empty_threshold = fifo_stat & 4 if fifo_empty or fifo_empty_threshold: return False else: return True def armed(self): """Returns True if the PhyWhisperer is armed. """ if self.read_reg(self.REG_ARM, 1)[0]: return True else: return False def wait_disarmed(self): """Blocks until armed() returns false. """ while self.armed(): pass def get_fpga_buildtime(self): """Returns date and time when FPGA bitfile was generated. """ raw = self.read_reg(self.REG_BUILDTIME, 4) # definitions: Xilinx XAPP1232 day = raw[3] >> 3 month = ((raw[3] & 0x7) << 1) + (raw[2] >> 7) year = ((raw[2] >> 1) & 0x3f) + 2000 hour = ((raw[2] & 0x1) << 4) + (raw[1] >> 4) minute = ((raw[1] & 0xf) << 2) + (raw[0] >> 6) return "FPGA build time: {}/{}/{}, {}:{}".format(month, day, year, hour, minute) def trigger_clock_phase_shift(self, steps=1): """Shifts the trigger clock phase (and by extension the output trigger) in steps of 18.6ps (18.6 ps = 1/960 MHz / 56) Args: steps (int): Number of steps to shift the phase (positive or negative integer). """ if not type(steps) == int or steps == 0: raise ValueError('Illegal steps value, must be non-zero integer.') if steps > 0: value = [1] else: value = [0] for i in range(abs(steps)): self.write_reg(self.REG_TRIG_CLK_PHASE_SHIFT, value) while (self.read_reg(self.REG_TRIG_CLK_PHASE_SHIFT, 1)[0] == 1): # phase shift incomplete; wait: pass def set_stat_pattern(self, pattern, mask=0x1f): """ Set a 5-bit pattern and mask for the USB status lines. Args: pattern (int): 5-bit number mask (int): non-zero 5-bit number (default: 0x1f) """ if pattern < 0 or pattern > 0x1f: raise ValueError('Illegal pattern value, must be <= 0x1f.') if mask < 1 or mask > 0x1f: raise ValueError('Illegal mask value, must be <= 0x1f and > 0.') self.write_reg(self.REG_STAT_PATTERN, [pattern, mask]) def stat_pattern_matched(self): """ Returns 1 if a stat pattern match occurred (automatically resets to 0 when armed, and when a new match pattern is written). Actual match value is stored in self.stat_pattern_match_value. """ matched, value = self.read_reg(self.REG_STAT_MATCH, 2) self.stat_pattern_match_value = value return matched def register_sink(self, event_sink): """ ViewSB: Registers a USBEventSink to receive any USB events. Args: event_sink (sniffer.USBEventSink): The sniffer.USBEventSink object to receive any USB events that occur. """ self.sniffer.register_sink(event_sink) def _device_stop_capture(self): # nothing to do? pass def run_capture(self, size=8188, burst=True, pattern=[0], mask=[0], timeout=5, statistics_callback=None, statistics_period=0.1, halt_callback=lambda _ : False, ): """ Runs a capture for ViewSB, including power cycling the device to catch the descriptors. Runs following internally:: self.reset_fpga() self.set_power_source("host") self.set_power_source("off") time.sleep(0.5) self.set_usb_mode("auto") self.set_capture_size(size) self.arm() self.set_trigger(enable=False) self.set_pattern(pattern=pattern, mask=mask) self.set_power_source("host") time.sleep(0.25) """ self.reset_fpga() self.set_power_source("host") self.set_power_source("off") time.sleep(0.5) self.set_usb_mode("auto") self.set_capture_size(size) self.arm() self.set_trigger(enable=False) self.set_pattern(pattern=pattern, mask=mask) self.set_power_source("host") time.sleep(0.25) self.entries_captured = 0 self._start_comms_thread(burst, timeout) elapsed_time = 0.0 try: # Continue until the user-supplied halt condition is met. while not halt_callback(elapsed_time): # If we have a statistics callback, call it. if callable(statistics_callback): statistics_callback(self, elapsed_time) # Wait for the next statistics-interval to occur. time.sleep(statistics_period) elapsed_time = elapsed_time + statistics_period finally: self._device_stop_capture() def __comms_thread_body(self, burst, timeout=5, burst_size=8192): """ ViewSB internal function that executes as our comms thread. Args: burst (bool): If True, read all FIFO at once, then pass on to decoder and frontend; otherwise, read smaller chunks and process them concurrently. burst_size (int): Number of entries to read at a time when burst=False """ if burst: self.wait_disarmed() rawdata = self.read_capture_data() self.handle_incoming_bytes(rawdata) else: notdone = True early_exit = False starttime = time.time() while notdone: raw = self.read_reg(self.REG_SNIFF_FIFO_RD, 4*burst_size) bitmask = 2**self.FE_FIFO_STAT_CAPTURE_DONE + 2**self.FE_FIFO_STAT_EMPTY if raw[-3] & bitmask == bitmask: notdone = False if raw[-3] & 2**self.FE_FIFO_STAT_OVERFLOW_BLOCKED: logging.warning("FIFO overflowed, capture stopped") early_exit = True elif timeout and time.time() - starttime > timeout: logging.warning("Capture timed out!") early_exit = True notdone = False # filter out the empty FIFO reads: rawdata = [] for i in range(int(len(raw)/4)): if raw[i*4+3] & 3 != self.FE_FIFO_CMD_STRM: rawdata.append(raw[i*4+1:i*4+4]) self.handle_incoming_bytes(rawdata) self.entries_captured += len(rawdata) if early_exit: logging.warning("%d entries captured." % self.entries_captured) def _start_comms_thread(self, burst, timeout): """ ViewSB: start the background thread that handles our core communication. """ self.commthread = threading.Thread(target=self.__comms_thread_body, args=[burst, timeout], daemon=True) self.__comm_term = False self.__comm_exc = None self.commthread.start() self.__comm_term = False def close(self): """ Terminates our connection to the PhyWhisperer device. """ if self.viewsb: self.__comm_term = True self.commthread.join() self.usb.close()
newaetech/phywhispererusb
software/phywhisperer/usb.py
usb.py
py
34,049
python
en
code
77
github-code
6
7925957401
# ITP-100 Software Design # Student: Jeannotte, Michael # Instructor: Brown, Georgia # Date given to class: 9-12-2022 # Date of Submission: # Description: # Input: # Output: # Additional Comments: V 1.0 studentID = int(input('Enter your 6 digit Student Identification Number:')) F_name = input('Please Enter your First Name:') L_name = input('Please Enter your Last Name:') Age = int(input('What is your age?')) Student_Addy = (input('Please Enter your full street address:')) Phone_Num = int(input('Please enter a 10 digit phone number:')) # Test
ProjectInzom/public
PythonClass/Projects/Lab02/StudentRecords.py
StudentRecords.py
py
550
python
en
code
0
github-code
6
30477628890
# Count Pairs with given sum (2 Sum Problem) (Count Pairs Problem) # Count Pairs (returns count) def getPairsCount(arr, n, k): map = {} cnt = 0 for i in range(n): temp = k - arr[i] if temp in map: cnt += map[temp] if arr[i] in map: map[arr[i]] += 1 else: map[arr[i]] = 1 return cnt # T.C = O(N) # S.C = O(N) # Return indices def twoSum(arr, n, k): map = {} for i in range(n): temp = k - arr[i] if temp in map: return {map[temp], i} map[arr[i]] = i return {} # ------------------------------------------------------------------------------------------------------------------------ # Count Pairs (return count) # Count Pairs (T/F) # All Pairs given sum # Count pairs with given sum (return count) # Key Pair (return T/F) # Find all pairs with a given sum (return pairs) # Count distinct pairs with difference k # Pair with given sum in a sorted array
prabhat-gp/GFG
Arrays/Arrays Easy/8a_pairs.py
8a_pairs.py
py
1,000
python
en
code
0
github-code
6
14048408200
from io import StringIO from pathlib import Path import streamlit as st import time from detect import detect import os import sys import argparse from PIL import Image import shutil import streamlit.components.v1 as components def get_subdirs(b='.'): ''' Returns all sub-directories in a specific Path ''' result = [] for d in os.listdir(b): bd = os.path.join(b, d) if os.path.isdir(bd): result.append(bd) return result def get_detection_folder(): ''' Returns the latest folder in a runs\detect ''' return max(get_subdirs(os.path.join('runs', 'detect')), key=os.path.getmtime) if __name__ == '__main__': st.title('Bird Identification System ') table_html = """ <!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <link rel="stylesheet" href="//stackpath.bootstrapcdn.com/bootstrap/4.2.1/css/bootstrap.min.css" integrity="sha384-GJzZqFGwb1QTTN6wy59ffF1BuGJpLSa9DkKMp0DgiMDm4iYMj70gZWKYbI706tWS" crossorigin="anonymous"> <style> .bd-placeholder-img { font-size: 1.125rem; text-anchor: middle; } @media (min-width: 768px) { .bd-placeholder-img-lg { font-size: 3.5rem; } } </style> <link rel="stylesheet" href="/static/style.css"> <title>Bird Identification System</title> </head> <body class="text-center"> <form class="form-signin card mb-6" method=post enctype=multipart/form-data> <img class="mb-4" src="https://ts1.cn.mm.bing.net/th/id/R-C.93dc7e23a93c7b1b1d23361ce54692a1?rik=6cirEfWmxE5hyQ&riu=http%3a%2f%2fpic.bizhi360.com%2fbbpic%2f0%2f4300.jpg&ehk=kJ5JAQiiwI2BtUKwuLsGoUzUtUagshyomug1aDlAc3A%3d&risl=&pid=ImgRaw&r=0" alt="" width="150" style="border-radius:50%"> <h1 class="h3 mb-3 font-weight-normal">Upload Any Bird Image or Video</h1> <br /> <button> <span class="box"> Weclome! </span> </button> <p class="mt-5 mb-3 text-muted">Built using Streamlit and Pytorch</p> </body> <!-- Github Ribbon Start--> <a href="https://github.com" class="github-corner"><svg width="160" height="160" viewBox="0 0 250 250" style="fill:#0E2E3B; color:#FFFFFF; position: absolute; top: 0; border: 0; right: 0;"> <path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path> <path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path> <path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"></path> </svg></a> <style> .github-corner:hover .octo-arm { animation: octocat-wave 560ms ease-in-out } @keyframes octocat-wave { 0%, 100% { transform: rotate(0) } 20%, 60% { transform: rotate(-25deg) } 40%, 80% { transform: rotate(10deg) } } @media (max-width:500px) { .github-corner:hover .octo-arm { animation: none } .github-corner .octo-arm { animation: octocat-wave 560ms ease-in-out } } </style> <!-- Github Ribbon End--> </html>""" components.html(table_html, height=400, scrolling=True) parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='best100.pt', help='model.pt path(s)') parser.add_argument('--source', type=str, default='100birds/test/', help='source') parser.add_argument('--img-size', type=int, default=224, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.45, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='display results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--no-trace', action='store_true', help='don`t trace model') opt = parser.parse_args() print(opt) source = ("图片检测", "视频检测", "文件夹检测") source_index = st.sidebar.selectbox("选择输入", range( len(source)), format_func=lambda x: source[x]) if source_index == 0: uploaded_file = st.sidebar.file_uploader( "上传图片", type=['png', 'jpeg', 'jpg']) if uploaded_file is not None: is_valid = True with st.spinner(text='资源加载中...'): st.sidebar.image(uploaded_file) picture = Image.open(uploaded_file) picture = picture.save(f'100birds/test/{uploaded_file.name}') opt.source = f'100birds/test/{uploaded_file.name}' else: is_valid = False elif source_index == 1: uploaded_file = st.sidebar.file_uploader("上传视频", type=['mp4']) if uploaded_file is not None: is_valid = True with st.spinner(text='资源加载中...'): st.sidebar.video(uploaded_file) with open(os.path.join("100birds", "video", uploaded_file.name), "wb") as f: f.write(uploaded_file.getbuffer()) opt.source = f'100birds/video/{uploaded_file.name}' else: is_valid = False else: uploaded_files = st.sidebar.file_uploader("上传文件夹", accept_multiple_files=True) for uploaded_file in uploaded_files: if uploaded_file is not None: is_valid = True with st.spinner(text='资源加载中...'): st.sidebar.image(uploaded_file) picture = Image.open(uploaded_file) picture = picture.save(f'100birds/test/{uploaded_file.name}') opt.source = f'100birds/test/{uploaded_file.name}' else: is_valid = False is_valid = True if is_valid: print('valid') if source_index == 0: if st.button('开始检测'): detect(opt) with st.spinner(text='Preparing Images'): for img in os.listdir(get_detection_folder()): st.image(str(Path(f'{get_detection_folder()}') / img)) st.balloons() elif source_index == 1: if st.button('开始检测'): detect(opt) with st.spinner(text='Preparing Video'): for vid in os.listdir(get_detection_folder()): st.video(str(Path(f'{get_detection_folder()}') / vid)) st.balloons() else: if st.button('开始检测'): for dirpath, dirname, filenames in os.walk('100birds/test'): for filename in filenames: opt.source = os.path.join(dirpath, filename) detect(opt) with st.spinner(text='Preparing file folder'): for img in os.listdir(get_detection_folder()): st.image(str(Path(f'{get_detection_folder()}') / img)) # for vid in os.listdir(get_detection_folder()): # st.video(str(Path(f'{get_detection_folder()}') / vid)) st.balloons() shutil.rmtree('100birds/test') os.mkdir('100birds/test') # streamlit run main.py
fengxizxf/yolov-bird
main.py
main.py
py
9,736
python
en
code
3
github-code
6
1218087321
import os from collections import namedtuple #Define a named tuple to represent our files FileStruct = namedtuple("File", "file_name file_ext file_path dir") def XMLifyFile(file_struct): return '\t\t<file alias="' + file_struct.file_name + '">' + file_struct.file_path + "</file>\n" valid_exts = [".graphml", ".png", ".gif", ".jpg" , ".png", ".ico"] directories = {} for root, dirs, files in os.walk("."): for file_name in files: # Trim off the ./ dir_name = root[2:] alias_name = dir_name dir_name = "Resources/" + dir_name # Append Resource Path file_prefix = os.path.splitext(file_name)[0] file_ext = os.path.splitext(file_name)[1].lower() file_path = os.path.join(dir_name, file_name) if file_ext in valid_exts: s = FileStruct(file_name = file_prefix, file_ext = file_ext, file_path = file_path, dir = dir_name) if alias_name not in directories: directories[alias_name] = [] directories[alias_name].append(s) resource_file = open("../resources.qrc", 'w') resource_file.write("<RCC>\n") for dir_name in directories: open_qresource = '\t<qresource prefix="/'+ dir_name + '">\n' close_qresource = '\t</qresource>\n' resource_file.write(open_qresource) for file_struct in directories[dir_name]: resource_file.write(XMLifyFile(file_struct)) resource_file.write(close_qresource) resource_file.write("</RCC>\n") resource_file.flush() resource_file.close()
cdit-ma/SEM
medea/src/app/Resources/resourceqrcmaker.py
resourceqrcmaker.py
py
1,521
python
en
code
3
github-code
6
30578979945
import os import json from datetime import date from flask import Flask, g, jsonify, request, abort from flask_cors import CORS #comment this on deployment from db.jfl_db import Database app = Flask(__name__) CORS(app) #comment this on deployment def get_db(): ''' Returns the document indexing object. Initializes a new Index object if one doesn't exist ''' db = getattr(g, '_db', None) if db is None: db = Database( host="localhost", user="jeff", password="password", database="jfl" ) return db def validate_current_week_request(params): if params.get('year') is None: print("INFO: no 'year' attribute found in rewquest. Adding the current year") params['year'] = date.today().year def generate_current_week_response(args): ''' Returns a JSON response with the current week ''' response = get_db().get_current_week(args['year']) return jsonify({"current_week": response}) def validate_teams_playing_request(params): errors = [] if params.get('week') is None: print("ERROR: no 'week' attribute found in request.") errors.append("No 'week' attribute found in request.") if params.get('year') is None: print("INFO: no 'year' attribute found in rewquest. Adding the current year") params['year'] = date.today().year if len(errors) > 0: error_message = '\n' + '\n\t'.join(errors) abort(400, f"Invalid Request: {error_message}") def generate_teams_playing_response(args): ''' Returns a JSON response with the teams playing for the week ''' response = get_db().get_teams_playing(args['week'], args['year']) return jsonify(response) def validate_draft_status_request(params): errors = [] if params.get('week') is None: print("ERROR: no 'week' attribute found in request.") errors.append("No 'week' attribute found in request.") if params.get('year') is None: print("INFO: no 'year' attribute found in rewquest. Adding the current year") params['year'] = date.today().year if len(errors) > 0: error_message = '\n' + '\n\t'.join(errors) abort(400, f"Invalid Request: {error_message}") def generate_draft_status_response(args): ''' Returns a JSON response with the teams playing for the week ''' response = get_db().get_current_picks(args['week'], args['year']) return jsonify(response) def validate_pick_team_request(params): errors = [] if params.get('user_id') is None: print("ERROR: no 'user_id' attribute found in rewquest.") errors.append("No 'user_id' attribute found in request.") if params.get('week') is None: print("ERROR: no 'week' attribute found in request.") errors.append("No 'week' attribute found in request.") if params.get('pick') is None: print("ERROR: no 'pick' attribute found in request.") errors.append("No 'pick' attribute found in request.") if params.get('team') is None: print("ERROR: no 'team' attribute found in request.") errors.append("No 'team' attribute found in request.") if params.get('year') is None: print("INFO: no 'year' attribute found in request. Adding the current year") params['year'] = date.today().year if len(errors) > 0: error_message = '\n' + '\n\t'.join(errors) abort(400, f"Invalid Request: {error_message}") def generate_pick_team_response(args): ''' Runs the pick_teams db call Returns a JSON successful message upon completion ''' get_db().select_team(args['user_id'], args['week'], args['pick'], args['team'], args['year']) return jsonify({"success": True}) def validate_season_picks_request(params): if params.get('year') is None: print("INFO: no 'year' attribute found in request. Adding the current year") params['year'] = date.today().year def generate_season_picks_response(args): ''' Returns a JSON response with the teams selected by week & user/player ''' response = get_db().get_season_selections(args['year']) return jsonify(response) def validate_standings_request(params): if params.get('year') is None: print("INFO: no 'year' attribute found in request. Adding the current year") params['year'] = date.today().year def generate_standings_response(args): ''' Returns a JSON response with NFL team information ''' response = get_db().get_standings(args['year']) return jsonify(response) def validate_team_data_request(params): if params.get('team_id') is None: print("ERROR: no 'team_id' attribute found in request.") abort(400, f"Invalid Request: no 'team_id' attribute found in request.") def generate_team_data_response(args): ''' Returns a JSON response with NFL team information ''' response = get_db().get_team_info(args['team_id']) return jsonify(response) def validate_user_data_request(params): if params.get('user_id') is None: print("ERROR: no 'user_id' attribute found in request.") abort(400, f"Invalid Request: no 'user_id' attribute found in request.") def generate_user_data_response(args): ''' Returns a JSON response with user information ''' response = get_db().get_user_info(args['user_id']) return jsonify(response) def validate_complete_week_request(params): errors = [] if params.get('week') is None: print("ERROR: no 'week' attribute found in request.") abort(400, f"Invalid Request: no 'week' attribute found in request.") if params.get('year') is None: print("INFO: no 'year' attribute found in request. Adding the current year") params['year'] = date.today().year if len(errors) > 0: error_message = '\n' + '\n\t'.join(errors) abort(400, f"Invalid Request: {error_message}") def generate_complete_week_response(args): ''' Returns a JSON response verifying the week was completed ''' response = get_db().complete_week(args['week'], args['year']) return jsonify({"success": True}) def validate_reset_week_request(params): errors = [] if params.get('week') is None: print("ERROR: no 'week' attribute found in request.") abort(400, f"Invalid Request: no 'week' attribute found in request.") if params.get('year') is None: print("INFO: no 'year' attribute found in request. Adding the current year") params['year'] = date.today().year if len(errors) > 0: error_message = '\n' + '\n\t'.join(errors) abort(400, f"Invalid Request: {error_message}") def generate_reset_week_response(args): ''' Returns a JSON response verifying the draft picks were reset ''' response = get_db().reset_picks(args['week'], args['year']) return jsonify({"success": True}) def validate_sim_games_request(params): errors = [] if params.get('week') is None: print("ERROR: no 'week' attribute found in request.") abort(400, f"Invalid Request: no 'user_id' attribute found in request.") if params.get('year') is None: print("INFO: no 'year' attribute found in request. Adding the current year") params['year'] = date.today().year if len(errors) > 0: error_message = '\n' + '\n\t'.join(errors) abort(400, f"Invalid Request: {error_message}") def generate_sim_games_response(args): ''' Returns a JSON response verifying the games were simulated for the week ''' response = get_db().sim_week(args['week'], args['year']) return jsonify({"success": True}) @app.route('/api/current_week', methods=['GET']) def api_current_week(): ''' Route for the API to get the current week ''' request_args = dict(request.args) validate_current_week_request(request_args) return generate_current_week_response(request_args) @app.route('/api/teams_playing', methods=['GET']) def api_get_teams_playing(): ''' Route for the API to get the teams playing ''' request_args = dict(request.args) validate_teams_playing_request(request_args) return generate_teams_playing_response(request_args) @app.route('/api/draft_status', methods=['GET']) def api_draft_status(): ''' Route for the API to get the draft status ''' request_args = dict(request.args) validate_draft_status_request(request_args) return generate_draft_status_response(request_args) @app.route('/api/pick_team', methods=['POST']) def api_pick_team(): ''' Route for the API to make a draft selection ''' request_data = json.loads(request.data) validate_pick_team_request(request_data) return generate_pick_team_response(request_data) @app.route('/api/season_selections', methods=['GET']) def api_season_picks(): ''' Route for the API to get the league's picks for the entire season ''' request_args = dict(request.args) validate_season_picks_request(request_args) return generate_season_picks_response(request_args) @app.route('/api/standings', methods=['GET']) def api_standings(): ''' Route for the API to get the standings of the league ''' request_args = dict(request.args) validate_standings_request(request_args) return generate_standings_response(request_args) @app.route('/api/team_data', methods=['GET']) def api_team_data(): ''' Route for the API to get information about an NFL team ''' request_args = dict(request.args) validate_team_data_request(request_args) return generate_team_data_response(request_args) @app.route('/api/user_data', methods=['GET']) def api_user_data(): ''' Route for the API to get information about a user ''' request_args = dict(request.args) validate_user_data_request(request_args) return generate_user_data_response(request_args) @app.route('/api/complete_week', methods=['POST']) def api_complete_week(): ''' Route for the API to complete the week and move to the next week ''' request_data = json.loads(request.data) validate_complete_week_request(request_data) return generate_complete_week_response(request_data) @app.route('/api/reset_week', methods=['POST']) def api_reset_week(): ''' Route for the API to reset the draft picks for a week ''' request_data = json.loads(request.data) validate_reset_week_request(request_data) return generate_reset_week_response(request_data) @app.route('/api/sim_games', methods=['POST']) def api_sim_games(): ''' Route for the API to sim the games for a week ''' request_data = json.loads(request.data) validate_sim_games_request(request_data) return generate_sim_games_response(request_data) if __name__ == '__main__': # Run the app debug = os.environ.get('DEBUG', 'false').lower() == 'true' app.run(host='0.0.0.0', port=5000, debug=debug)
zrahn93/jfl
jfl_services/run.py
run.py
py
11,376
python
en
code
0
github-code
6
83273089
import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score def read_file(): # 读取数据 df = pd.read_table("/yourfilepath/SMSSpamCollection", header=None, names=['label', 'sms_message']) # 做一个map表,0表示'ham',1表示'spam' df['label'] = df.label.map({'ham': 0, 'spam': 1}) return df def train_and_test_data(df_data): # 分割训练集和测试机 X_train, X_test, y_train, y_test = train_test_split(df_data['sms_message'], df_data['label'], random_state=1) # 创建实例 count_vector = CountVectorizer() # 训练数据转成矩阵 training_data = count_vector.fit_transform(X_train) # 转化测试集 testing_data = count_vector.transform(X_test) naive_bayes = MultinomialNB() # 运用朴素贝叶斯算法 naive_bayes.fit(training_data, y_train) # 预测数据 predictions = naive_bayes.predict(testing_data) return predictions, X_train, X_test, y_train, y_test def evaluate_model(predictions, y_test): print('Accuracy score: ', format(accuracy_score(y_test, predictions))) print('Precision score: ', format(precision_score(y_test, predictions))) print('Recall score: ', format(recall_score(y_test, predictions))) print('F1 score: ', format(f1_score(y_test, predictions))) def print_testing_result(X_test, y_test, predictions): category_map = {0: 'ham', 1: 'spam'} for message, category, real in zip(X_test[50:100], predictions[50:100], y_test[50:100]): print('\n recevie message:', message, '\n prediction:', category_map[category], 'true value:', category_map[real]) if __name__ == "__main__": df = read_file() predictions, _, X_test, _, y_test = train_and_test_data(df) evaluate_model(predictions, y_test) print_testing_result(X_test, y_test, predictions)
goelo/machine_learning
naive_bayes/smsspammessage.py
smsspammessage.py
py
2,190
python
en
code
4
github-code
6
11902565567
#!/usr/bin/env python3 from pprint import pprint import subprocess import pyone import config # ----------------------- # Connect to OpenNebula # ----------------------- one = pyone.OneServer(config.ONE['address'], session='%s:%s' % (config.ONE['username'], config.ONE['password'])) # prepare hosts to ips mapping hostToIp = {} hosts = one.hostpool.info() for host in hosts.HOST: if host.STATE != 2: continue host_name = host.TEMPLATE['HOSTNAME'] hostToIp[host_name] = {} hostToIp[host_name]['ip'] = host.TEMPLATE['IP_ADDRESS'] hostToIp[host_name]['cgroups'] = host.TEMPLATE['CGROUPS_VERSION'] print('{count} compute nodes found'.format(count=len(hostToIp))) # get vms vms = one.vmpool.infoextended(-2, -1, -1, -1) for vm in vms.VM: host = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME hostIp = hostToIp[host]['ip'] hostCgroups = int(hostToIp[host]['cgroups']) if hostCgroups == 2: cpuCount = float(vm.TEMPLATE.get('CPU')) shares = int(100 * cpuCount) print('Updating VM %s: virsh -c qemu+tcp://%s/system schedinfo %s --set cpu_shares=%s' % (vm.NAME, hostIp, vm.DEPLOY_ID, shares)) subprocess.check_call('virsh -c qemu+tcp://%s/system schedinfo %s --set cpu_shares=%s' % (hostIp, vm.DEPLOY_ID, shares), shell=True)
OpenNebula/addon-3par
scripts/helpers/update-cpu-shares.py
update-cpu-shares.py
py
1,335
python
en
code
2
github-code
6
25881621247
#!/usr/bin/python f = open("in.txt", "r") overlapping = 0 for vrstica in f: a = vrstica.split(",") prvi, drugi = a[0].split("-"), a[1].split("-") if (int(prvi[0]) >= int(drugi[0]) and int(prvi[1]) <= int(drugi[1])) or ( int(prvi[0]) <= int(drugi[0]) and int(prvi[1]) >= int(drugi[1])): overlapping += 1 print(overlapping)
Anja159/Advent_of_code_2022
Day4/part1.py
part1.py
py
364
python
en
code
1
github-code
6
18524942065
model = dict( type='AGMIDRPNet', drper=dict( type='AGMIDRPer', in_channel = 3, gather_width=6, drug_encoder=dict( type='DrugGATEncoder', num_features_xd=78, heads=10, output_dim=128, gat_dropout=0.2 ), genes_encoder=dict( type='MultiEdgeGatedGraphConv', out_channels=3, num_layers=6, num_edges=3, aggr='add', bias=True, ), head=dict( type='AGMIFusionHead', out_channels=128, ), neck=dict( type='AGMICellNeck', in_channels=[6,8,16], out_channels=[8,16,32], kernel_size=[16,16,16], drop_rate=0.2, max_pool_size=[3,6,6], feat_dim=128 ), ), loss=dict(type='MSELoss', loss_weight=1.0, reduction='mean'), ) train_cfg = None test_cfg = dict(metrics=['MAE', 'MSE', 'RMSE', 'R2', 'PEARSON', 'SPEARMAN'])
yivan-WYYGDSG/AGMI
configs/_base_/models/AGMI/agmi_8layers.py
agmi_8layers.py
py
1,075
python
en
code
1
github-code
6
42607005601
# -*- coding: utf-8 -*- # author: inpurer(月小水长) # pc_type lenovo # create_date: 2018/12/9 # file_name: client.py # description: 月小水长,热血未凉 from socket import * if __name__ == "__main__": serverName = '127.0.0.1' serverPort = 13000 clientSocket = socket(AF_INET, SOCK_STREAM) clientSocket.connect((serverName, serverPort)) while True: echoMessage = input("请输入:") clientSocket.send(echoMessage.encode("utf-8")) print("来自服务端的应答",clientSocket.recv(1024).decode("utf-8")) #发出关闭连接命令,服务端收到并回复后立即关闭 if echoMessage.upper() == "BYE": break
inspurer/ComputerNetwork
echo服务/select实现并发/client.py
client.py
py
729
python
en
code
17
github-code
6
23248318495
import pygame #necessary pygame initializing pygame.init() #create a surface that will be seen by the user screen = pygame.display.set_mode((600, 400)) background= pygame.image.load('Background-1.png') #create a varible for degrees pf rotation degree = 0 while True: screen.blit(background, (0,0)) #create shapes so you can tell rotation is happenning ball = pygame.image.load('arrow.png') ##ORIGINAL UNCHANGED #what coordinates will the static image be placed: where = 200, 200 #draw surf to screen and catch the rect that blit returns blittedRect = screen.blit(ball, where) screen.blit(background, (0,0)) ##ROTATED #get center of surf for later oldCenter = blittedRect.center #rotate surf by DEGREE amount degrees rotatedSurf = pygame.transform.rotate(ball, degree) #get the rect of the rotated surf and set it's center to the oldCenter rotRect = rotatedSurf.get_rect() rotRect.center = oldCenter #draw rotatedSurf with the corrected rect so it gets put in the proper spot screen.blit(rotatedSurf, rotRect) degree-=5 if degree <-90: degree = 0 #show the screen surface pygame.display.flip() pygame.time.wait(60)
Soupupup/pythonsensorgame
rotation test 2.py
rotation test 2.py
py
1,287
python
en
code
0
github-code
6
31016470858
# coding=utf-8 import document import shape from db import error, Trae_Fila, lee, rg_vacio, gpx, cl, copia_rg, FDC, GS_INS, u_libre, Busca_Prox, Fecha, i_selec, \ Abre_Aplicacion, Abre_Empresa from db import Int, Num, lista, Num_aFecha, lee_dc from aa_funciones import Serie, GetColumnasACC_CK, LineaToDc from shape import TextBox def CertificadoSAT(partes, args, columnas_checks=GetColumnasACC_CK(), LineaToDc=LineaToDc, Serie=Serie, cl=cl, gpx=gpx): #from gsWord import document, shape import os u_libres = {} def Posicion(campo, arp, cl=cl, gpx=gpx): fila = Trae_Fila(FDC[gpx[1]][arp][2], campo, clr=-2) fmt = Trae_Fila(FDC[gpx[1]][arp][2], campo, clr=2) if fila is None: error(cl, "No existe el campo '%s' en el diccionario '%s'." % (campo, arp)) return fila, fmt def merge_vars(text, dc, cl=cl): for key in dc.keys(): if key not in text: continue if type(dc[key]) == list: text = text.split(' ') for k in range(len(text)): if text[k].startswith(key): text[k] = text[k].split('/') if len(text[k]) == 1: text[k] = str(dc[key]) elif len(text[k]) == 2: v = '' if Int(text[k][1]) < len(dc[key]): v = str(dc[key][Int(text[k][1])]) text[k] = v text[k] = str(dc[key][Int(text[k][1])]) elif len(text[k]) == 3: v = '' if Int(text[k][1]) < len(dc[key]) and Int(text[k][2]) < len(dc[key][Int(text[k][1])]): v = str(dc[key][Int(text[k][1])][Int(text[k][2])]) text[k] = v text = ' '.join(text) else: val = dc[key] if val is None: val = '' if hasattr(dc[key], 'keys') and 'IDX' in dc[key].keys(): val = dc[key]['IDX'] text = text.replace(key, val) return text ## def GetAcciones(cd_parte, parte, idioma, columnas_checks=columnas_checks, _LineaToDc=LineaToDc, cl=cl, gpx=gpx): acciones_parte = i_selec(cl, gpx, 'acciones', 'ACC_NPAR', cd_parte, cd_parte) _checks = {} _contratos_checks = {} _tecnicos = [] dc_tecnicos = {} _observaciones = {} _tipo_elementos = [] arts = {} periodicidad = '' first_action = {} tipo_parte = parte['PT_TIPO']['IDX'] acciones_parte=[] c='016232' while c < '016255': if c=='016250': c='016251' continue acciones_parte.append(c) c=Busca_Prox(c) for cd_accion in acciones_parte: rg_accion = lee_dc(lee_dc, gpx, 'acciones', cd_accion) if not periodicidad: periodicidad = rg_accion['ACC_TPER'] if rg_accion['ACC_TPER'] == 'M': periodicidad = rg_accion['ACC_TPER'] cdar = rg_accion['ACC_CDAR']['IDX'] if cdar not in arts.keys(): arts[cdar] = lee_dc(lee_dc, gpx, 'articulos', cdar, rels='') articulo = arts[cdar] if articulo['AR_TIPO'] != 'CC': deno_n1 = '' deno_n2 = '' for ln in FDC[gpx[1]]['articulos'][2]: campo, descripcion, tipo, cols, relaciones = ln[:5] if campo == idioma['PTCI_N1EI']: if relaciones: r = lee_dc(lee_dc, gpx, relaciones, articulo[campo]) deno_n1 = r.get(FDC[gpx[1]][relaciones][2][0][0], '') else: deno_n1 = '' if campo == idioma['PTCI_N2EI']: if relaciones: r = lee_dc(lee_dc, gpx, relaciones, articulo[campo]) try: deno_n2 = r.get(FDC[gpx[1]][relaciones][2][0][0], '') except: error(cl, (FDC[gpx[1]][relaciones][2][0][0], r)) else: deno_n2 = '' if deno_n1: fila_n1 = Trae_Fila(_tipo_elementos, deno_n1, clb=0, clr=-2) if fila_n1 is None: fila_n1 = len(_tipo_elementos) _tipo_elementos.append([deno_n1, []]) if deno_n2 and deno_n2 not in _tipo_elementos[fila_n1][1]: _tipo_elementos[fila_n1][1].append(deno_n2) tx_ = '' if rg_accion['ACC_NSER']: tx_ = idioma['PTCI_NS'] + ' ' + rg_accion['ACC_NSER'] + ' - ' id_ns_ = cdar + rg_accion['ACC_MAR']['IDX'] + rg_accion['ACC_NSER'] cd_numero_serie = '%s%s (%s)' % (tx_, articulo['AR_DENO'], id_ns_) normativa = '' # Se acumula por normativa if articulo['AR_TIPO'] == 'CC': # Si no hay check del elemento se busca el check del contrato cd_check = rg_accion['ACC_CKGC'] if cd_check: rg_check = lee_dc(lee_dc, gpx, 'contratos_checklists', cd_check) if rg_check == 1: error(cl, "No existe el checklist del contrato '%s'." % cd_check) normativa = rg_check['CCK_NOR']['IDX'] else: cd_check = rg_accion['ACC_CKG']['IDX'] # Si hay check del elemento if cd_check: rg_check = lee_dc(lee_dc, gpx, 'checklists', cd_check) if rg_check == 1: error(cl, "No existe el checklist '%s'." % cd_check) normativa = rg_check['CK_NOR']['IDX'] if normativa: rg_normativa = lee_dc(lee_dc, gpx, 'normativa', normativa) if rg_normativa == 1: error(cl, "No existe la normativa '%s'." % normativa) else: rg_normativa = {} _texto_antes, _texto_desupes, _tabla_resumen = '', '', '' oficial = 'S' for _linea in rg_normativa.get('NOR_CERT', []): if _linea[0] != tipo_parte or _linea[1] != oficial: continue _texto_antes, _texto_desupes, _tabla_resumen = _linea[2:5] '''_clave = (rg_normativa.get('NOR_NOR', 0), rg_normativa.get('NOR_CAP', 0), normativa, rg_normativa.get('NOR_TITNOR', {}).get('NORC_DENO', ''), rg_normativa.get('NOR_IMP', ''), _texto_antes, _texto_desupes)''' _clave = (rg_normativa.get('NOR_NOR', 0), rg_normativa.get('NOR_CAP', 0),) # print _clave if _clave not in _checks.keys(): _checks[_clave] = {'BOLEANOS': [], 'OTROS': [], 'RESUMEN': { 'horizontal': False, 'titulos': [], 'anchos': [], 'h_align': [], 'texto': [], 'lineas': [] }, 'DATA': [normativa, rg_normativa.get('NOR_TITNOR', {}).get('NORC_DENO', ''), rg_normativa.get('NOR_IMP', ''), _texto_antes, _texto_desupes] } if _tabla_resumen: td = eval(_tabla_resumen) if len(td) == 2: ls_tabla, horizontal = td texto_antes_tabla = '' elif len(td) == 3: ls_tabla, horizontal, texto_antes_tabla = td else: error(cl, "Nº de argumentos incorectos en la tabla de resumen") _checks[_clave]['RESUMEN']['horizontal'] = horizontal == 'S' _checks[_clave]['RESUMEN']['texto'] = texto_antes_tabla ln_tabla = [] for ln_ in ls_tabla: col_name, diccionario, nombre_campo, columna, relacion, ancho, columna_filtrar, _idx_filtrar = ln_ if diccionario == 'acciones': pos, fmt = Posicion(nombre_campo, diccionario) value = rg_accion[nombre_campo] if _clave not in first_action.keys(): first_action[_clave] = True if first_action[_clave]: _checks[_clave]['RESUMEN']['anchos'].append(str(ancho) + '%') _checks[_clave]['RESUMEN']['titulos'].append(col_name) if columna_filtrar: fmt = fmt.split(' ') value = Trae_Fila(value, _idx_filtrar, clb=Int(columna_filtrar), clr=Int(columna)) if value is None: value = '' fmt = fmt[Int(columna)] h_align = 'j' if fmt == 'd': h_align = 'c' value = Num_aFecha(value) if value is None: value = '' elif fmt in 'i012345': h_align = 'r' if first_action[_clave]: _checks[_clave]['RESUMEN']['h_align'].append(h_align) if type(value) == dict: value = value['IDX'] ln_tabla.append(value) _checks[_clave]['RESUMEN']['lineas'].append(ln_tabla) if _clave not in _observaciones.keys(): _observaciones[_clave] = [] key = '' grupo = '' for k in range(len(rg_accion['ACC_CK'])): dc_check = _LineaToDc(rg_accion['ACC_CK'][k], columnas_checks) pregunta = dc_check['descripcion'] respuesta = dc_check['respuesta'] padre = dc_check['padre'] numero = dc_check['numero'] tipo = dc_check['tipo'] if padre: fila_ns = None if grupo == 'BOLEANOS': fila_padre = Trae_Fila(_checks[_clave][grupo], padre, clb=0, clr=-2) if fila_padre is None: fila_padre = len(_checks[_clave][grupo]) _checks[_clave][grupo].append(copia_rg(linea)) else: fila_ns = Trae_Fila(_checks[_clave][grupo], cd_numero_serie, clb=0, clr=-2) if fila_ns is None: fila_ns = len(_checks[_clave][grupo]) _checks[_clave][grupo].append([cd_numero_serie, []]) fila_padre = Trae_Fila(_checks[_clave][grupo][fila_ns][1], padre, clb=0, clr=-2) if fila_padre is None: fila_padre = len(_checks[_clave][grupo][fila_ns][1]) _checks[_clave][grupo][fila_ns][1].append(copia_rg(linea)) # error(cl, (linea, fila_padre, checks[clave][grupo][fila_padre])) '''if grupo == 'BOLEANOS': _checks[_clave][grupo][fila_padre][1] += ' ' + pregunta + ' ' + respuesta else: a = _checks[_clave][grupo] _checks[_clave][grupo][fila_ns][1][fila_padre][1] += '\n' + pregunta + ' ' + respuesta''' if grupo == 'BOLEANOS': _checks[_clave][grupo][fila_padre][1] += '\n' + pregunta + ' ' + respuesta else: a = _checks[_clave][grupo] _checks[_clave][grupo][fila_ns][1][fila_padre][1] += '\n' + pregunta + ' ' + respuesta continue grupo = 'OTROS' linea = [numero, pregunta, respuesta] if tipo in 'XB': grupo = 'BOLEANOS' linea = [numero, pregunta, 0, 0, 0] if grupo == 'BOLEANOS': fila = Trae_Fila(_checks[_clave][grupo], numero, clb=0, clr=-2) if fila is None: fila = len(_checks[_clave][grupo]) _checks[_clave][grupo].append(linea) if respuesta == 'SI': _checks[_clave][grupo][fila][2] += 1 elif respuesta == 'N/A': _checks[_clave][grupo][fila][3] += 1 elif respuesta == 'NO': _checks[_clave][grupo][fila][4] += 1 else: fila = Trae_Fila(_checks[_clave][grupo], cd_numero_serie, clb=0, clr=-2) if fila is None: fila = len(_checks[_clave][grupo]) _checks[_clave][grupo].append([cd_numero_serie, []]) _checks[_clave][grupo][fila][1].append(linea) if rg_accion['ACC_OBS']: if articulo['AR_TIPO'] != 'CC': _observaciones[_clave].append(cd_numero_serie) _observaciones[_clave].append(rg_accion['ACC_OBS']) for ln_tec in rg_accion['ACC_TNCS']: cd_tecnico = ln_tec[0] estado = ln_tec[1] dc_tecnicos[cd_tecnico] = dc_tecnicos.get(cd_tecnico, lee_dc(lee_dc, gpx, 'personal', cd_tecnico)) if dc_tecnicos[cd_tecnico] == 1: error(cl, "No existe el técnico '%s'." % cd_tecnico) rgt = dc_tecnicos[cd_tecnico] if rgt['PE_DENO'] not in _tecnicos: _tecnicos.append(rgt['PE_DENO']) first_action[_clave] = False return [_checks, _contratos_checks, _tecnicos, _observaciones, _tipo_elementos, periodicidad] ## def LeeParte(cd_parte, cl=cl, gpx=gpx): dc = {} parte = lee_dc(lee_dc, gpx, 'partes', cd_parte) cliente = lee_dc(lee_dc, gpx, 'clientes', parte['PT_CCL']['IDX']) if cliente == 1: error(cl, "No existe el cliente '%s'." % parte['PT_CCL']['IDX']) parte.update(cliente) delegacion = lee_dc(lee_dc, gpx, 'delegaciones', parte['PT_CCL']['IDX'] + parte['PT_DEL']['IDX']) if delegacion == 1: error(cl, "No existe la delegación '%s' del cliente '%s'." % (parte['PT_DEL']['IDX'], parte['PT_CCL']['IDX'])) parte.update(delegacion) contrato = lee_dc(lee_dc, gpx, 'contratos', parte['PT_CON']['IDX']) ls_partes = i_selec(cl, gpx, 'partes', 'PT_CON', parte['PT_CON']['IDX'], parte['PT_CON']['IDX']) parte.update(contrato) parte['CN_FECU'] = '' if ls_partes: parte_ = lee_dc(cl, gpx, 'partes', ls_partes[-1], rels='n') parte['FECU'] = Num_aFecha(parte_['PT_FECPA']) parte['CN_FEC'] = Num_aFecha(parte['CN_FEC']) tipo_contrato = lee_dc(lee_dc, gpx, 'contratos_clases', parte['PT_CON']['CN_TIPO']) parte.update(tipo_contrato) parte['PT_DIREC/1/2'] = '' if len(parte['PT_DIREC']) > 1: parte['PT_DIREC/1/2'] = parte['PT_DIREC'][1][2] fecr = Num_aFecha(parte['PT_FECR']) if fecr is None: fecr = '' else: fecr = fecr[3:] parte['PT_FECR'] = fecr return [dc, parte] ## def FormateaTablaPrincipal(idioma, tabla, color_primario, color_secundario, header=True): tabla.set_spacing({'after': '60', 'before': '60'}) tabla.get_properties().set_cell_margin( {'start': {'w': '60'}, 'end': {'w': '60'}, 'top': {'w': '30'}, 'bottom': {'w': '30'}}) tabla.set_font_size(idioma['PTCI_FSIZE']) if header: tabla.get_row(0).set_background_colour(color_primario) tabla.get_row(0).set_foreground_colour(color_secundario) tabla.get_row(0).set_font_format('b') for row in tabla.get_rows(): row.get_cell(0).set_font_format('b') for cell in row.get_cells(): cell.get_properties().set_vertical_alignment('center') for p in cell.elements: for t in p.elements: t.set_font(idioma['PTCI_FONT']) ## def FormateaTitulo(idioma, paragraph): paragraph.set_font_format('b') paragraph.get_properties().set_keep_next(True) paragraph.set_font_size(idioma['PTCI_FSIZE'] + 4) paragraph.set_spacing({'before': '180', 'after': '80'}) for t in paragraph.elements: t.set_font(idioma['PTCI_FONT']) ## def FormateaCapitulo(idioma, paragraph): paragraph.set_font_format('b') paragraph.set_font_size(idioma['PTCI_FSIZE'] + 1) paragraph.set_spacing({'before': '180', 'after': '80'}) for t in paragraph.elements: t.set_font(idioma['PTCI_FONT']) ## def FormateaLista(idioma, paragraph): paragraph.set_font_size(idioma['PTCI_FSIZE']) for t in paragraph.elements: t.set_font(idioma['PTCI_FONT']) ## def FormateaElemento(idioma, paragraph): paragraph.set_font_format('b') paragraph.set_font_size(idioma['PTCI_FSIZE']) paragraph.set_spacing({'before': '120', 'after': '120'}) for t in paragraph.elements: t.set_font(idioma['PTCI_FONT']) ## def Footer(doc_word, idioma, parte, _path_s): footer = doc_word.get_DefaultFooter() #-footer.add_rtf(idioma['PTCI_LOPD']) ## def Header(header, doc_word, idioma, _path_i, _path_d, merge_vars=merge_vars, horizontal=False): num_pag = document.new_page_number(header, idioma['PTCI_SUFPAG']) num_pag.set_text_separator(idioma['PTCI_SEPPAG']) '''p = doc_word.new_paragraph(header) p.set_horizontal_alignment('r')''' paragraph_i = doc_word.new_paragraph(header, None) paragraph_i.AddPicture(header, _path_i, 1800, 1200) if _path_d: paragraph_d = doc_word.new_paragraph(header, None) paragraph_d.AddPicture(header, _path_d, 1800, 1200) table = header.add_table( [ [paragraph_i, None, paragraph_d], [None, num_pag] ], column_width=[2000, 6000, 2000], horizontal_alignment=['l', 'l', 'r'], # borders={'all': {'sz': 4}} ) table.get_row(1).get_cell(0).get_properties().set_grid_span(2) else: table = header.add_table( [ [paragraph_i, None], [None, num_pag] ], column_width=[2000, 8000], horizontal_alignment=['l', 'r'], # borders={'all': {'sz': 4}} ) font_size = idioma['PTCI_FSIZE'] heigth_line = font_size * 20 + 40 for ln_data in idioma['PTCI_TITULO']: paragraph = table.get_row(1).get_cell(0).add_paragraph(merge_vars(ln_data, parte), font_format='b') paragraph.set_spacing({'after': '60', 'before': '60', 'line': heigth_line}) paragraph.set_font_size(font_size + 2) for t in paragraph.elements: t.set_font(idioma['PTCI_FONT']) table.get_row(1).get_cell(0).set_font_format('b') table.get_row(1).get_cell(0).set_font_size(font_size + 4) table.get_row(1).get_cell(0).get_properties().set_table_cell_width(20000) table.get_row(1).get_cell(1).add_paragraph( merge_vars('Nº Certificado ' + parte['PT_NUMCERT'], parte), horizontal_alignment='r', font_size=font_size + 1, font_format='b' ) ls = [] for line in idioma['PTCI_HEAD']: ha = 'l' if not _path_d: ha = 'r' paragraph = table.get_row(0).get_cell(1).add_paragraph(line[0], horizontal_alignment=ha, font_format='b') paragraph.set_spacing({'after': '60', 'before': '60', 'line': heigth_line}) paragraph.set_font_size(font_size) for t in paragraph.elements: t.set_font(idioma['PTCI_FONT']) pos = (-5013, 8984) size = (10946, 428) if horizontal: pos = (-2700, 6120) size = (6800, 650) reg_mercantil = shape.TextBox( header, text=idioma['PTCI_REGM'], size=size, rotation=270, r_position=[ {'orientation': 'horizontal', 'position': pos[0], 'relative': 'page'}, {'orientation': 'vertical', 'position': pos[1], 'relative': 'page'} ], background_color='FFFFFF', horizontal_alignment='c', font_format='b', font_size=8 ) p = header.add_paragraph(reg_mercantil) p.set_spacing({'after': '0', 'before': '0', 'line': '10'}) ## def firma_sello(doc_word, body, _path_s, parte, idioma): ls = [] font_size = idioma['PTCI_FSIZE'] body.add_paragraph('') tec_firma = parte['PT_TCF']['IDX'] deno_tec = parte['PT_TCF'].get('PE_DENO', '') deno_tec += ' ' + parte['PT_TCF'].get('PE_DNI', '') deno_tec += ' ' + parte['PT_TCF'].get('PE_NCOL', '') sello = doc_word.new_image(body, _path_s, 1500, 1000, anchor='inline', horizontal_alignment='c') d_sello = '' # doc_word.new_image(body, _path_s, 1500, 1000, anchor='inline', horizontal_alignment='c') representante = parte['PT_REPR'] table_sellos = doc_word.new_table( body, [ [idioma['PTCI_TITSEL'], '', idioma['PTCI_TITCLI']], [sello, '', d_sello], [idioma['PTCI_PIESEL'] + deno_tec, '', idioma['PTCI_PIECLI'] + representante] ], horizontal_alignment=['c', 'c', 'c'], column_width=[3000, 4000, 3000] ) table_sellos.set_font_format({(0,): 'b', (2,): 'b'}) table_sellos.get_row(0).set_font_size(font_size) table_sellos.get_row(2).set_font_size(font_size) table_bloc = body.add_table( [[table_sellos]], horizontal_alignment=['c'], column_width=[10000] ) table_bloc.get_row(0).get_properties().cantSplit = True # TODO cargar firma técnico img_f = 1 # lee(cl,gpx,'imagenes-t',tec_firma) if img_f not in [1, '']: _path_f = path_temp + tec_firma open(_path_f, 'wb').write(img_f) ## def Portada(body, idioma, parte, numero_norma, numero_capitulo, section_margins, FormateaTitulo=FormateaTitulo, merge_vars=merge_vars, FormateaTablaPrincipal=FormateaTablaPrincipal): # Texto inicial if idioma['PTCI_TXTINI']: #-body.add_rtf(merge_vars(idioma['PTCI_TXTINI'], parte)) numero_norma += 1 # Apartado 1 titulo_elemento = '%d. %s' % (numero_norma, merge_vars(idioma['PTCI_TITDP'], parte)) numero_norma += 1 paragraph_1 = body.add_paragraph(titulo_elemento) FormateaTitulo(idioma, paragraph_1) grid_span = [] lineas_1 = [] e = [] for i in range(len(idioma['PTCI_DP'])): etiqueta, variable = idioma['PTCI_DP'][i][:2] e.append([i, variable]) if variable: lineas_1.append([merge_vars(etiqueta, parte), merge_vars(variable, parte)]) else: grid_span.append(i) lineas_1.append([merge_vars(etiqueta, parte)]) tabla_1 = body.add_table(lineas_1, column_width=['25%', '75%'], borders=borders) for i in grid_span: tabla_1.get_row(i).get_cell(0).get_properties().set_grid_span(2) tabla_1.get_row(i).get_cell(0).get_properties().set_table_cell_width('100%') FormateaTablaPrincipal(idioma, tabla_1, color_primario, color_secundario, False) # Apartado 2 titulo_elemento = '%d. %s' % (numero_norma, merge_vars(idioma['PTCI_TITDI'], parte)) numero_norma += 1 paragraph_1 = body.add_paragraph(titulo_elemento) FormateaTitulo(idioma, paragraph_1) grid_span = [] lineas_1 = [] for i in range(len(idioma['PTCI_DI'])): etiqueta, variable = idioma['PTCI_DI'][i][:2] if variable: lineas_1.append([merge_vars(etiqueta, parte), merge_vars(variable, parte)]) else: grid_span.append(i) lineas_1.append([merge_vars(etiqueta, parte)]) tabla_1 = body.add_table(lineas_1, column_width=['25%', '75%'], borders=borders) for i in grid_span: tabla_1.get_row(i).get_cell(0).get_properties().set_grid_span(2) tabla_1.get_row(i).get_cell(0).get_properties().set_table_cell_width('100%') FormateaTablaPrincipal(idioma, tabla_1, color_primario, color_secundario, False) _section = body.add_section() _section.SetMargins(section_margins) # Apartado 3 titulo_elementos = '%d. %s' % (numero_norma, merge_vars(idioma['PTCI_TITEI'], parte)) FormateaTitulo(idioma, body.add_paragraph(titulo_elementos)) numero_norma += 1 if idioma['PTCI_TXTEI']: #-body.add_rtf(idioma['PTCI_TXTEI']) pass t = 1 for ln in tipo_elementos: s = 1 deno_grupo, familias = ln[:3] r = ' ' if r > 9: r = '' FormateaLista(idioma, body.add_paragraph('%d. %s%s' % (t, r, deno_grupo))) for deno_familia in familias: FormateaLista(idioma, body.add_paragraph(' %d.%d. %s' % (t, s, deno_familia))) s += 1 t += 1 # Textos contrato for cd_text in parte['PT_CON']['CN_TXT']: body.add_section().SetMargins(section_margins) _text = lee_dc(lee_dc, gpx, 'fcartas', cd_text) _text['FA_TXT'] = merge_vars(_text['FA_TXT'], parte) FormateaTitulo(idioma, body.add_paragraph(str(numero_norma) + '. ' + _text['FA_DENO'])) #print '88888888888888888' body.add_rtf(_text['FA_TXT']) numero_norma += 1 lista_checks_c = checks.keys() lista_checks_c.sort() _nor = '' nn = n_capitulo = 1 for ln in lista_checks_c: _norma, _capitulo = ln if _norma > 1: continue cd_normativa, nombre_normativa, nombre_capitulo, _texto_antes, _texto_despues = checks[ln]['DATA'] if not _nor: body.add_section().SetMargins(section_margins) if not nombre_normativa: nombre_normativa = 'DATOS GENERALES' if not _nor or _nor != _norma: _nor = str(numero_norma) + '. ' + nombre_normativa nn = numero_norma numero_norma += 1 n_capitulo = 1 paragraph_elemento = body.add_paragraph(_nor) FormateaTitulo(idioma, paragraph_elemento) paragraph_elemento = body.add_paragraph('%d.%d. %s' % (nn, n_capitulo, nombre_capitulo)) FormateaCapitulo(idioma, paragraph_elemento) datos_generales = copia_rg(checks[ln].get('BOLEANOS', [])) otros_datos_generales = copia_rg(checks[ln].get('OTROS', [])) del checks[ln] if _texto_antes: rg_txt = lee_dc(lee_dc, gpx, 'fcartas', _texto_antes) if rg_txt == 1: error(cl, "No existe el texto '%s'" % _texto_antes) txt_antes = rg_txt['FA_TXT'] txt_antes = merge_vars(txt_antes, parte) #-body.add_rtf(txt_antes) if datos_generales: # #$>datos_generales titulo_datos_generales = idioma['PTCI_TBOO'] for j in range(len(datos_generales)): datos_generales[j][0] = "%d.%d.%d" % (nn, n_capitulo, j + 1) if '\n' in datos_generales[j][1]: datos_generales[j][1] = datos_generales[j][1].split('\n') tabla_datos_generales = body.add_table(datos_generales, titulo_datos_generales, column_width=columnas_checks, borders=borders_checks, horizontal_alignment=['r', 'j', 'c', 'c', 'c'] ) FormateaTablaPrincipal(idioma, tabla_datos_generales, color_primario, color_secundario) if otros_datos_generales: if datos_generales: body.add_paragraph('') for i in range(len(otros_datos_generales)): titulo, lineas = otros_datos_generales[i] for j in range(len(lineas)): lineas[j][0] = "%d.%d.%d" % (nn, n_capitulo, j + 1) if '\n' in lineas[j][1]: lineas[j][1] = lineas[j][1].split('\n') _pie = body.add_table(lineas, idioma['PTCI_TNOR'], horizontal_alignment=['r', 'l', 'l'], column_width=columnas_others, borders=borders_checks) FormateaTablaPrincipal(idioma, _pie, color_primario, color_secundario) n_capitulo += 1 if _texto_despues: body.add_paragraph('') rg_txt = lee_dc(lee_dc, gpx, 'fcartas', _texto_despues) if rg_txt == 1: error(cl, "No existe el texto '%s'" % _texto_despues) txt_despues = rg_txt['FA_TXT'] txt_despues = merge_vars(txt_despues, parte) #-body.add_rtf(txt_despues) else: body.add_paragraph('') if not dc_observaciones.get(ln, []): observaciones = [''] else: observaciones = [''] * len(dc_observaciones[ln]) _tabla_obs = body.add_table(observaciones, [idioma['PTCI_OBS']], horizontal_alignment=['j'], column_width=['100%'], borders=borders_obs ) for i in range(len(dc_observaciones.get(ln, []))): _tabla_obs.get_row(i + 1).get_cell(0).elements = [] if dc_observaciones[ln][i].startswith('{\\rtf'): #-_tabla_obs.get_row(i + 1).get_cell(0).add_rtf(dc_observaciones[ln][i]) pass else: _tabla_obs.get_row(i + 1).get_cell(0).add_paragraph(dc_observaciones[ln][i]) FormateaTablaPrincipal(idioma, _tabla_obs, color_primario, color_secundario) if otros_datos_generales and datos_generales: firma_sello(doc_word, body, _path_s, parte, idioma) return numero_norma, numero_capitulo ## idioma = lee_dc(lee_dc, gpx, 'partes_certificado_idioma', '000') path_temp = GS_INS + '/temp/' if not os.path.exists(path_temp): os.makedirs(path_temp) parametros = lee_dc(lee_dc, gpx, 'parametros', '0') # Se recuperan los colores para las cabeceras de la tabla color = lista(parametros['P_COLPRI'], ':', 3) rgb = (Int(color[0]), Int(color[1]), Int(color[2])) color_primario = '%02x%02x%02x' % rgb if not parametros['P_COLSEC']: parametros['P_COLSEC'] = '255:255:255' color = lista(parametros['P_COLSEC'], ':', 3) rgb = (Int(color[0]), Int(color[1]), Int(color[2])) color_secundario = '%02x%02x%02x' % rgb imd = idioma['PTCI_LOGOD'] _path_d = '' if imd: img_d = lee(cl, gpx, 'imgs_v', imd) if img_d == 1: error(cl, "No existe el logo " + imd) _path_d = path_temp + imd open(_path_d, 'wb').write(img_d) imi = idioma['PTCI_LOGOI'] img_i = lee(cl, gpx, 'imgs_v', imi) if img_i == 1: error(cl, "No existe el logo " + imi) _path_i = path_temp + imi open(_path_i, 'wb').write(img_i) im_sello = idioma['PTCI_IMGSEL'] img_sello = lee(cl, gpx, 'imgs_v', im_sello) if img_i == 1: error(cl, "No existe el logo " + im_sello) _path_s = path_temp + im_sello open(_path_s, 'wb').write(img_sello) accion = args.get('accion', '') impresora = args.get('desti', '') borders_checks = {'all': {'sz': 4, 'color': color_primario, 'space': 0}} borders_obs = copia_rg(borders_checks) borders_obs['insideH'] = {'sz': 0, 'color': color_primario, 'space': 0} columnas_checks = [750, 7450, 600, 600, 600] columnas_others = [750, 7450, 1800] section_margins = {'left': 953, 'right': 953, 'top': 1000, 'header': 600, 'footer': 400} documentos_generados = [] borders = {'all': {'sz': 4, 'color': color_primario, 'space': 0}, 'insideV': {'value': 'nil'}} for cd_parte in partes: dc_parte, parte = LeeParte(cd_parte) parte.update(parametros) if parte['PT_NUMCERT'] and False: rgcertificado = lee_dc(lee_dc, gpx, 'partes_certificado', parte['PT_NUMCERT']) documentos_generados.append(rgcertificado['PTC_DOC']) continue serie_cer = parte['PT_SERC']['IDX'] ser_cert_complete = Serie(serie_cer, parte['PT_FECR']) if ser_cert_complete not in u_libres.keys(): u_libres[ser_cert_complete] = u_libre(gpx, 'partes_certificado', ser_cert_complete) idx_cert = u_libres[ser_cert_complete] parte['PT_NUMCERT'] = idx_cert r = GetAcciones(cd_parte, parte, idioma) checks, contratos_checks, tecnicos, dc_observaciones, tipo_elementos, periodicidad_revision = r if periodicidad_revision == 'M': parte['PT_PERIODICIDAD'] = 'TRIMESTRAL' else: parte['PT_PERIODICIDAD'] = 'ANUAL' # r = open('c:/users/jonathan/desktop/data_parte.txt', 'r').read() # dc_parte, parte, checks, tecnicos, observaciones = eval(r) checks_sin_normativa = [] numero_norma = 1 numero_capitulo = 1 for clave in checks.keys(): for i in range(len(checks[clave]['BOLEANOS'])): si, na, no = '', '', '' if checks[clave]['BOLEANOS'][i][4] > 0: si, na, no = '', '', 'X' elif checks[clave]['BOLEANOS'][i][3] > 0: si, na, no = '', 'X', '' elif checks[clave]['BOLEANOS'][i][2] > 0: si, na, no = 'X', '', '' checks[clave]['BOLEANOS'][i][2] = si checks[clave]['BOLEANOS'][i][3] = na checks[clave]['BOLEANOS'][i][4] = no if clave[0] == 0: try: checks_sin_normativa = copia_rg(checks[clave]) del checks[clave] except: pass continue ## # Se crea un documento en blanco f = cd_parte + '.docx' if True: f = cd_parte + Fecha('hms').replace(':', '') + '.docx' doc_word = document.Document(path_temp, f) doc_word.empty_document() # doc_word._debug = True # Body 1- Portada body = doc_word.get_body() section = body.get_active_section() # Modifico los márgenes de la página para hacerlo mas estrecho section.SetMargins(section_margins) # Header header = doc_word.get_DefaultHeader() Header(header, doc_word, idioma, _path_i, _path_d) # footer Footer(doc_word, idioma, parte, _path_s) if idioma['PTCI_TITULO1']: titulo = '%d. %s' % (numero_capitulo, merge_vars(idioma['PTCI_TITULO1'], parte)) numero_capitulo += 1 p_titulo = body.add_paragraph(titulo) FormateaTitulo(idioma, p_titulo) lista_checks = checks.keys() lista_checks.sort() # Se renumeran las preguntas for ln in lista_checks: if ln[0] == numero_norma and ln[1] == numero_capitulo: titulo_elemento = str(numero_capitulo) + '. ' + ln[3] dc_checks = copia_rg(checks[ln]) numero_norma, numero_capitulo = Portada(body, idioma, parte, numero_norma, numero_capitulo, section_margins) # Body 2- Elementos change_header = False _nor = '' nn = 0 cn = 0 er = [] for ln in lista_checks: # Cada grupo de elementos empezará en una nueva sección y en una nueva página n_norma, n_capitulo = ln if n_norma == 1: continue cd_check_, titulo_norma, titulo_elemento, texto_antes, texto_despues = checks[ln]['DATA'] sect = body.add_section(margin_rigth=953, margin_left=953, margin_footer=400, orient='') if change_header: h = doc_word.new_header(sect) Header(h, doc_word, idioma, _path_i, _path_d) change_header = False if not cn or cn != n_norma: _nor = str(numero_norma) + '. ' + titulo_norma nn = numero_norma cn = n_norma numero_norma += 1 n_capitulo = 1 paragraph_elemento = body.add_paragraph(_nor) FormateaTitulo(idioma, paragraph_elemento) paragraph_1 = body.add_paragraph('%d.%d. %s' % (nn, n_capitulo, titulo_elemento)) FormateaCapitulo(idioma, paragraph_1) if texto_antes: rg_txt = lee_dc(lee_dc, gpx, 'fcartas', texto_antes) if rg_txt == 1: error(cl, "No existe el texto '%s'" % texto_antes) txt_antes = rg_txt['FA_TXT'] #-body.add_rtf(txt_antes) boolean_data = checks[ln].get('BOLEANOS', []) if boolean_data: for i in range(len(boolean_data)): boolean_data[i][0] = "%d.%d.%d" % (nn, numero_capitulo, i + 1) if '\n' in boolean_data[i][0]: boolean_data[i][0] = boolean_data[i][0].split('\n') # #$>boolean_data encabezado = body.add_table(boolean_data, idioma['PTCI_TBOO'], horizontal_alignment=['r', 'l', 'c', 'c', 'c'], column_width=columnas_checks, borders=borders_checks) FormateaTablaPrincipal(idioma, encabezado, color_primario, color_secundario) body.add_paragraph('') other_data = checks[ln].get('OTROS', []) if other_data: for i in range(len(other_data)): titulo, lineas = other_data[i] p_od = body.add_paragraph(titulo) FormateaElemento(idioma, p_od) p_od.get_properties().set_keep_next(True) for j in range(len(lineas)): lineas[j][0] = "%d.%d.%d" % (nn, numero_capitulo, j + 1) if '\n' in lineas[j][0]: lineas[j][0] = lineas[j][0].split('\n') pie = body.add_table(lineas, idioma['PTCI_TNOR'], horizontal_alignment=['r', 'l', 'l'], column_width=columnas_others, borders=borders_checks) FormateaTablaPrincipal(idioma, pie, color_primario, color_secundario) n_capitulo += 1 body.add_paragraph('') numero_capitulo += 1 if not dc_observaciones.get(ln, []): observaciones = [''] else: observaciones = [''] * len(dc_observaciones[ln]) tabla_obs = body.add_table( observaciones, [idioma['PTCI_OBS']], horizontal_alignment=['j'], column_width=['100%'], borders=borders_obs ) for i in range(len(dc_observaciones.get(ln, []))): # tabla_obs.get_row(i+1).get_cell(0).elements=[] if dc_observaciones[ln][i].startswith('{\\rtf'): pass #-tabla_obs.get_row(i + 1).get_cell(0).add_rtf(dc_observaciones[ln][i]) else: tabla_obs.get_row(i + 1).get_cell(0).add_paragraph(dc_observaciones[ln][i]) FormateaTablaPrincipal(idioma, tabla_obs, color_primario, color_secundario) if texto_despues: body.add_paragraph('') rg_txt = lee_dc(lee_dc, gpx, 'fcartas', texto_despues) if rg_txt == 1: error(cl, "No existe el texto '%s'" % texto_despues) txt_despues = rg_txt['FA_TXT'] #-body.add_rtf(txt_despues) #firma_sello(doc_word, body, _path_s, parte, idioma) tabla_resumen = checks[ln].get('RESUMEN', {}) if tabla_resumen.get('lineas', []): imprimir_horizontal = tabla_resumen['horizontal'] orient = '' ml = 953 if imprimir_horizontal: orient = 'landscape' ml = 1253 body.add_section(margin_rigth=953, margin_left=ml, margin_footer=400, orient=orient) new_section = body.get_active_section() if imprimir_horizontal: h = doc_word.new_header(new_section) Header(h, doc_word, idioma, _path_i, _path_d, horizontal=True) change_header = True v = new_section.get_width() - new_section.get_margin_rigth() - new_section.get_margin_left() if tabla_resumen.get('texto', ''): #-body.add_rtf(tabla_resumen['texto']) pass titulo_tabla = tabla_resumen['titulos'] anchos_tabla = tabla_resumen['anchos'] for i in range(len(anchos_tabla)): anchos_tabla[i] = int(v * (Num(anchos_tabla[i].replace('%', '')) / 100.)) h_align_tabla = tabla_resumen['h_align'] lineas_tabla = tabla_resumen['lineas'] tabla_res = body.add_table(lineas_tabla, titulo_tabla, column_width=anchos_tabla, borders=borders_checks) FormateaTablaPrincipal(idioma, tabla_res, color_primario, color_secundario) doc_word.set_variables({}) doc_word.save() '''file_ = open(path_temp + f, 'rb').read() documentos_generados.append(file_) rg_certificado = rg_vacio(gpx[1], 'partes_certificado') rg_certificado < PTC_NPAR > = cd_parte rg_certificado < PTC_DOC > = file_ serie_cer = parte['PT_SERC']['IDX'] ser_cert_complete = Serie(serie_cer, parte['PT_FECR']) if ser_cert_complete not in u_libres.keys(): u_libres[ser_cert_complete] = u_libre(gpx, 'partes_certificado', ser_cert_complete) idx_cert = u_libres[ser_cert_complete] rgparte = lee_dc(None, gpx, 'partes', cd_parte, rels='') rgparte['PT_NUMCERT'] = idx_cert # p_actu(cl, gpx, 'partes', cd_parte, rgparte, a_graba='', log='') u_libres[ser_cert_complete] = Busca_Prox(u_libres[ser_cert_complete])''' # p_actu(cl, gpx, 'partes_certificado', idx_cert, rg_certificado, a_graba='', log='') #del file_ os.system('start ' + path_temp + f) return documentos_generados if __name__ == '__main__': Abre_Aplicacion('sat') Abre_Empresa(gpx[0], gpx[1], gpx[2]) CertificadoSAT(['0000004555'], {})
JonathanServiaMandome/gsWord
utilities/certificate.py
certificate.py
py
36,095
python
es
code
0
github-code
6
74348094589
from collections import namedtuple, defaultdict from itertools import combinations, product from math import sqrt from typing import List INPUTTEST = 'inputtest.txt' INPUTREAL = 'input.txt' def getLines(fileName): file = open(fileName,'r') lines = file.read().splitlines() file.close() return lines class Point3(namedtuple('Point', 'x y z')): def __repr__(self): return f'{self.x},{self.y},{self.z}' # performed x y z rotations in a loop and stored in a set rotations = [([2, 0, 1], [-1, -1, 1]), ([0, 1, 2], [1, -1, -1]), ([2, 1, 0], [-1, -1, -1]), ([2, 1, 0], [1, -1, 1]), ([0, 2, 1], [-1, -1, -1]), ([1, 2, 0], [1, -1, -1]), ([1, 0, 2], [-1, -1, -1]), ([1, 2, 0], [1, 1, 1]), ([0, 2, 1], [-1, 1, 1]), ([0, 1, 2], [-1, 1, -1]), ([0, 2, 1], [1, -1, 1]), ([2, 0, 1], [-1, 1, -1]), ([1, 0, 2], [1, 1, -1]), ([2, 1, 0], [1, 1, -1]), ([2, 0, 1], [1, 1, 1]), ([2, 1, 0], [-1, 1, 1]), ([0, 1, 2], [1, 1, 1]), ([1, 0, 2], [1, -1, 1]), ([1, 0, 2], [-1, 1, 1]), ([0, 1, 2], [-1, -1, 1]), ([1, 2, 0], [-1, 1, -1]), ([1, 2, 0], [-1, -1, 1]), ([0, 2, 1], [1, 1, -1]), ([2, 0, 1], [1, -1, -1])] def formatInput(lines): scanners = {} scannerCurrent = [] i = 0 lines.append("") for line in filter(lambda x: not x.startswith("---"), lines): if len(line) == 0 and len(scannerCurrent) > 0: scanners[i] = scannerCurrent scannerCurrent = [] i += 1 else: scannerCurrent.append([int(t) for t in line.split(",")]) return scanners def partOne(scanners): intersects = getIntersects(scanners) mapping_dict = createMappings(intersects, scanners) beacons = set(toPoint(p) for p in scanners[0]) used_mappings = set() transformed_scanners = {0} scanner_origins = [[0, 0, 0]] while len(transformed_scanners) < len(scanners): queue = [k for k in mapping_dict.keys() if k[0] in transformed_scanners and k[1] not in transformed_scanners] while len(queue) > 0: el = queue.pop() if el[1] in transformed_scanners: continue p_transpose = list(zip(*scanners[el[1]])) centroid = list(zip([0, 0, 0])) # origin relative to scanner itself is 0, 0, 0 use_mapping = el while True: centroid = transform(centroid, *mapping_dict[use_mapping]) p_transpose = transform(p_transpose, *mapping_dict[use_mapping]) new_points = set(toPoint(p) for p in zip(*p_transpose)) if use_mapping[0] == 0: break for mapping in used_mappings: if mapping[1] == use_mapping[0]: use_mapping = mapping break scanner_origins.append([centroid[0][0], centroid[1][0], centroid[2][0]]) transformed_scanners.add(el[1]) beacons.update(new_points) used_mappings.add(el) return len(beacons), scanner_origins def partTwo(scanner_origins): return max(sum(map(lambda x: abs(x[0] - x[1]), zip(*p))) for p in combinations(scanner_origins, 2)) def createMappings(intersects, scanners): mappingDict = {} for i in intersects: pointToDistA = defaultdict(set) for p in combinations(scanners[i[0]], 2): dist = euclidDist(*p) pointToDistA[toPoint(p[0])].add(dist) pointToDistA[toPoint(p[1])].add(dist) pointToDistB = defaultdict(set) for p in combinations(scanners[i[1]], 2): dist = euclidDist(*p) pointToDistB[toPoint(p[0])].add(dist) pointToDistB[toPoint(p[1])].add(dist) pointsA = [] pointsB = [] for p in product(pointToDistA.keys(), pointToDistB.keys()): intersect = pointToDistA[p[0]].intersection(pointToDistB[p[1]]) if len(intersect) >= 11: # 12 common beacons 1 src and 11 dst for distance pointsA.append(pointToList(p[0])) pointsB.append(pointToList(p[1])) mappingDict[i] = mapScannerAToB(pointsA, pointsB) return mappingDict def mapScannerAToB(pointsA, pointsB): aTranspose = list(zip(*pointsA)) bTranspose = list(zip(*pointsB)) for perms, signs in rotations: rotated = rotate(bTranspose, perms, signs) offset = [] for p in zip(rotated, aTranspose): points = set([x[1] - x[0] for x in zip(p[0], p[1])]) if len(points) == 1: offset.append(points.pop()) if len(offset) == 3: return offset, perms, signs return None def transform(itemToTransform, centerOfTarget, transformPerm, transformSign): rotated = rotate(itemToTransform, transformPerm, transformSign) return [list(map(lambda x: centerOfTarget[i] + x, p)) for i, p in enumerate(rotated)] def getIntersects(scanners): intersections = [] distDict = {i: set(euclidDist(*p) for p in combinations(scanners[i], 2)) for i in scanners.keys()} for i in combinations(range(len(scanners)), 2): if len(distDict[i[0]].intersection(distDict[i[1]])) >= 66: intersections.append(i) intersections.append((i[1], i[0])) return intersections def rotate(point, perms, signs): return map(lambda n: n * signs[0], point[perms[0]]), \ map(lambda n: n * signs[1], point[perms[1]]), \ map(lambda n: n * signs[2], point[perms[2]]) def euclidDist(a, b): return sqrt(sum(map(lambda x: pow(x[0] - x[1], 2), zip(a, b)))) def toPoint(plist): if len(plist) == 3: return Point3(plist[0], plist[1], plist[2]) else: raise Exception("Can't cover to point") def pointToList(p): return [p.x, p.y, p.z] lines = getLines(INPUTREAL) scanners = formatInput(lines) part1, centroids = partOne(scanners) '''Part One''' print(part1) '''Part Two''' print(partTwo(centroids))
David-Hatcher/AoC2021
Day 19/Day19.py
Day19.py
py
5,987
python
en
code
1
github-code
6
11299411121
from functools import update_wrapper import logging from .action import FunctionAction from .request import Request from .traject import Traject from .config import Configurable from .settings import SettingSectionContainer from .converter import ConverterRegistry from .predicate import PredicateRegistry from .tween import TweenRegistry from . import generic from reg import Registry as RegRegistry, CachingKeyLookup import venusian from . import compat from .compat import with_metaclass from .implicit import set_implicit from .mount import MountRegistry from .reify import reify from .template import TemplateEngineRegistry COMPONENT_CACHE_SIZE = 5000 ALL_CACHE_SIZE = 5000 FALLBACK_CACHE_SIZE = 5000 class Registry(Configurable, RegRegistry, MountRegistry, PredicateRegistry, ConverterRegistry, TweenRegistry, TemplateEngineRegistry): """A registry holding an application's configuration. """ app = None # app this registry belongs to. set later during scanning def __init__(self, name, bases, testing_config): self.name = name bases = [base.registry for base in bases if hasattr(base, 'registry')] RegRegistry.__init__(self) MountRegistry.__init__(self) PredicateRegistry.__init__(self) Configurable.__init__(self, bases, testing_config) ConverterRegistry.__init__(self) TweenRegistry.__init__(self) TemplateEngineRegistry.__init__(self) self.settings = SettingSectionContainer() self.clear() def actions(self): yield FunctionAction(self, generic.settings), lambda: self.settings def clear(self): """Clear all registrations in this application. """ RegRegistry.clear(self) MountRegistry.clear(self) PredicateRegistry.clear(self) Configurable.clear(self) ConverterRegistry.clear(self) TweenRegistry.clear(self) TemplateEngineRegistry.clear(self) self.traject = Traject() @reify def lookup(self): return CachingKeyLookup( self, COMPONENT_CACHE_SIZE, ALL_CACHE_SIZE, FALLBACK_CACHE_SIZE).lookup() def callback(scanner, name, obj): obj.registry.app = obj scanner.config.configurable(obj.registry) class AppMeta(type): def __new__(cls, name, bases, d): testing_config = d.get('testing_config') d['registry'] = Registry(name, bases, testing_config) result = super(AppMeta, cls).__new__(cls, name, bases, d) venusian.attach(result, callback) return result class App(with_metaclass(AppMeta)): """A Morepath-based application object. You subclass App to create a morepath application class. You can then configure this class using Morepath decorator directives. An application can extend one or more other applications, if desired, by subclassing them. By subclassing App itself, you get the base configuration of the Morepath framework itself. Conflicting configuration within an app is automatically rejected. An subclass app cannot conflict with the apps it is subclassing however; instead configuration is overridden. You can turn your app class into a WSGI application by instantiating it. You can then call it with the ``environ`` and ``start_response`` arguments. """ testing_config = None parent = None """The parent in which this app was mounted.""" request_class = Request """The class of the Request to create. Must be a subclass of :class:`morepath.Request`. """ def __init__(self): pass @reify def lookup(self): """Get the :class:`reg.Lookup` for this application. :returns: a :class:`reg.Lookup` instance. """ return self.registry.lookup def set_implicit(self): set_implicit(self.lookup) @reify def traject(self): return self.registry.traject def request(self, environ): """Create a :class:`Request` given WSGI environment for this app. :param environ: WSGI environment :returns: :class:`morepath.Request` instance """ return self.request_class(environ, self) def __call__(self, environ, start_response): """This app as a WSGI application. """ request = self.request(environ) response = self.publish(request) return response(environ, start_response) def ancestors(self): """Return iterable of all ancestors of this app. Includes this app itself as the first ancestor, all the way up to the root app in the mount chain. """ app = self while app is not None: yield app app = app.parent @reify def root(self): """The root application. """ return list(self.ancestors())[-1] def child(self, app, **variables): """Get app mounted in this app. Either give it an instance of the app class as the first parameter, or the app class itself (or name under which it was mounted) as the first parameter and as ``variables`` the parameters that go to its ``mount`` function. Returns the mounted application object, with its ``parent`` attribute set to this app object, or ``None`` if this application cannot be mounted in this one. """ if isinstance(app, App): result = app # XXX assert that variables is empty # XXX do we need to deal with subclasses of apps? if app.__class__ not in self.registry.mounted: return None else: if isinstance(app, compat.string_types): factory = self.registry.named_mounted.get(app) else: factory = self.registry.mounted.get(app) if factory is None: return None result = factory(**variables) result.parent = self return result def sibling(self, app, **variables): """Get app mounted next to this app. Either give it an instance of the app class as the first parameter, or the app class itself (or name under which it was mounted) as the first parameter and as ``variables`` the parameters that go to its ``mount`` function. Returns the mounted application object, with its ``parent`` attribute set to the same parent as this one, or ``None`` if such a sibling application does not exist. """ parent = self.parent if parent is None: return None return parent.child(app, **variables) @reify def publish(self): # XXX import cycles... from .publish import publish result = publish for tween_factory in reversed(self.registry.sorted_tween_factories()): result = tween_factory(self, result) return result @classmethod def directive(cls, name): """Decorator to register a new directive with this application class. You use this as a class decorator for a :class:`morepath.Directive` subclass:: @App.directive('my_directive') class FooDirective(morepath.Directive): ... This needs to be executed *before* the directive is being used and thus might introduce import dependency issues unlike normal Morepath configuration, so beware! An easy way to make sure that all directives are installed before you use them is to make sure you define them in the same module as where you define the application class that has them. """ return DirectiveDirective(cls, name) @classmethod def dotted_name(cls): return '%s.%s' % (cls.__module__, cls.__name__) class DirectiveDirective(object): def __init__(self, cls, name): self.cls = cls self.name = name def __call__(self, directive): directive_name = self.name def method(self, *args, **kw): result = directive(self, *args, **kw) result.directive_name = directive_name result.argument_info = args, kw result.logger = logging.getLogger('morepath.directive.%s' % directive_name) return result # this is to help morepath.sphinxext to do the right thing method.actual_directive = directive update_wrapper(method, directive.__init__) setattr(self.cls, self.name, classmethod(method)) return directive
magnus-lycka/morepath
morepath/app.py
app.py
py
8,665
python
en
code
null
github-code
6
30666636404
import sys import xbmc, xbmcgui import torrentitem #enable localization getLS = sys.modules[ "__main__" ].__language__ __cwd__ = sys.modules[ "__main__" ].__cwd__ # Actions ids ACTION_PARENT_DIR = 9 ACTION_PREVIOUS_MENU = 10 ACTION_MOVE_LEFT = 1 ACTION_MOVE_RIGHT = 2 ACTION_MOVE_UP = 3 ACTION_MOVE_DOWN = 4 ACTION_NAV_BACK = 92 def log(txt): xbmc.log(msg = "[SCRIPT] 'torrentrss.trackergui': %s" % (txt, ), level=xbmc.LOGDEBUG) ########################################## class TrackerGUI(xbmcgui.WindowXML): "GUI for tracker search results listing" # Declare this to handle events before control init control_list_id=-1 def __init__(self, *args, **kwargs): # Init window xbmcgui.WindowXML.__init__(self, *args, **kwargs) self.trackerMgr = kwargs['trackerMgr'] self.filterlist = [] self.loaded = 0 self.results = [] self.busyWindow = xbmcgui.WindowXMLDialog("DialogBusy.xml", __cwd__, "default") def __del__(self): "Destructor" pass def onInit(self): "Secondary constructor called after window xml is loaded" # Init gui self.defineControls() if (self.loaded == 0): # Feed name/id link list self.feedmap = {} self.feedmap['41010'] = getLS(50102) self.feedmap['41011'] = getLS(55101) self.feedmap['41012'] = getLS(55102) self.getControl(self.control_leftFrame_liststart_id).setSelected(True) self.filterlist.append(self.feedmap['41010']) # Clear gui list self.refreshListItem(self.filterlist) self.loaded = 1 def defineControls(self): "Initialize controls and link to gui widgets" # actions self.action_cancel_dialog = (ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, ACTION_NAV_BACK) self.action_up_down = (ACTION_MOVE_UP, ACTION_MOVE_DOWN) # Control ids self.control_list_id = 504 self.control_rightFrame_title_id = 40001 self.control_rightFrame_info_id = 40002 self.control_rightFrame_list_id = 40010 self.control_rightFrame_imgbox_id = 40005 self.control_leftFrame_sortorder_id = 4 self.control_leftFrame_filter_id = 19 self.control_leftFrame_refresh_id = 41004 self.control_leftFrame_sortby_id = 41005 self.control_leftFrame_liststart_id = 41010 self.control_leftFrame_listend_id = 41012 # Controls self.list = self.getControl(self.control_list_id) self.rightFrame_title = self.getControl(self.control_rightFrame_title_id) self.rightFrame_info = self.getControl(self.control_rightFrame_info_id) self.rightFrame_list = self.getControl(self.control_rightFrame_list_id) self.rightFrame_imgbox = self.getControl(self.control_rightFrame_imgbox_id) self.leftFrame_refresh = self.getControl(self.control_leftFrame_refresh_id) self.leftFrame_sortby = self.getControl(self.control_leftFrame_sortby_id) #self.leftFrame_sortorder = self.getControl(self.control_leftFrame_sortorder_id) #self.leftFrame_filter = self.getControl(self.control_leftFrame_filter_id) def showBusyWindow(self): " Display a 'working' window" self.busyWindow = xbmcgui.WindowXMLDialog("DialogBusy.xml", __cwd__, "default") self.busyWindow.show() def closeBusyWindow(self): " Close the 'working' window" self.busyWindow.close() def doModalSearch(self, keywords): # Show busy dialog self.showBusyWindow() # Run a sequential search on all trackers self.results = self.trackerMgr.search(keywords) # Close busy dialog self.closeBusyWindow() # Display results or error self.selectedlink = None if (len(self.results) > 0): self.doModal() else: xbmcgui.Dialog().ok(getLS(56001), getLS(56002), getLS(56003)) return self.selectedlink def refreshListItem(self, filterfeed=None): "Refresh GUI content" # Update feed list self.list.reset() i=0 alltxt=getLS(50102) for entry in self.results: #log(entry) if (filterfeed == None) or (alltxt in filterfeed) or (entry.tracker in filterfeed): item = xbmcgui.ListItem(entry.seeds, entry.title) item.setProperty('size', entry.size) item.setProperty('peers', entry.peers) item.setProperty('vip', 'p'+entry.vip) item.setProperty('pos', str(i)) self.list.addItem(item) i=i+1 self.list.selectItem(0) self.onSelected(self.list.getSelectedItem()) def clear(self): "Clear the all feed items after user confirmation" self.list.reset() self.trackerMgr.reset() def onClick(self, controlId): "React on mouse clicks/select key" if controlId == self.control_list_id: item = self.list.getSelectedItem() torrent = self.results[int(item.getProperty('pos'))] self.selectedlink = None if (xbmcgui.Dialog().yesno(torrent.title, getLS(56011))): if (len(torrent.magnet) > 0): self.selectedlink = torrent.magnet elif (len(torrent.torrent) > 0): self.selectedlink = torrent.torrent self.close() elif controlId == self.control_leftFrame_refresh_id: self.showBusyWindow() self.list.reset() self.clearRightFrame() self.results = self.trackerMgr.search(self.trackerMgr.getLastKeywords()) self.refreshListItem(self.filterlist) self.closeBusyWindow() elif controlId >= self.control_leftFrame_liststart_id and controlId <= self.control_leftFrame_listend_id: item = self.getControl(controlId) label = self.feedmap[str(controlId)] if item.isSelected(): self.filterlist.append(label) else: self.filterlist.remove(label) self.refreshListItem(self.filterlist) def onAction(self, action): "React on actions" if (action in self.action_cancel_dialog): self.close() elif (self.getFocusId() == self.control_list_id) and (action in self.action_up_down): item = self.list.getSelectedItem() self.onSelected(item) def onFocus(self, controlId): "React on focus" if controlId == self.control_list_id: item = self.list.getSelectedItem() self.onSelected(item) def onSelected(self, item): "Action when a new list item is selected (custom)" if (item): self.updateRightFrame(item) def updateRightFrame(self, item): "Update the right frame of the gui with given feed info" self.rightFrame_list.reset() srcfeed = self.results[int(item.getProperty('pos'))] self.rightFrame_info.setLabel(srcfeed.title) if srcfeed.trackerlogo != None: self.rightFrame_imgbox.setImage(srcfeed.trackerlogo) else: self.rightFrame_imgbox.setImage("Fanart_Fallback_Small.jpg") self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56202), srcfeed.seeds)) self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56203), srcfeed.peers)) self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56204), srcfeed.size)) self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56205), srcfeed.date)) self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56206), srcfeed.srcurl)) if (srcfeed.vip == "true"): self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56207), "Yes")) else: self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56207), "No")) if (len(srcfeed.magnet) >0): self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56208), "Yes")) else: self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56208), "No")) if (len(srcfeed.torrent) >0): self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56209), "Yes")) else: self.rightFrame_list.addItem(xbmcgui.ListItem(getLS(56209), "No")) def clearRightFrame(self): "Clear right frame with default values" self.rightFrame_info.setLabel(xbmc.getLocalizedString(416)) self.rightFrame_list.reset() self.rightFrame_imgbox.setImage("Fanart_Fallback_Small.jpg")
vche/script.torrentrss
resources/lib/trackergui.py
trackergui.py
py
7,816
python
en
code
1
github-code
6
650752867
import os import sys import json import unittest import numpy as np import luigi import z5py import cluster_tools.utils.volume_utils as vu from sklearn.metrics import adjusted_rand_score from elf.segmentation.mutex_watershed import mutex_watershed from elf.segmentation.watershed import apply_size_filter try: from ..base import BaseTest except Exception: sys.path.append(os.path.join(os.path.split(__file__)[0], "..")) from base import BaseTest class TestMws(BaseTest): input_key = "volumes/affinities" output_key = "data" offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-2, 0, 0], [0, -3, 0], [0, 0, -3], [-3, 0, 0], [0, -9, 0], [0, 0, -9], [-4, 0, 0], [0, -27, 0], [0, 0, -27]] strides = [4, 12, 12] def _check_result(self, size_filter): # load affs and compare with z5py.File(self.input_path, "r") as f: ds = f[self.input_key] ds.n_threads = 4 affs = vu.normalize(ds[:]) shape = affs.shape[1:] with z5py.File(self.output_path, "r") as f: res = f[self.output_key][:] self.assertEqual(res.shape, shape) exp = mutex_watershed(affs, self.offsets, self.strides) if size_filter > 0: exp, _ = apply_size_filter(exp.astype("uint32"), np.max(affs[:3], axis=0), size_filter) score = adjusted_rand_score(exp.ravel(), res.ravel()) expected_score = 0.1 self.assertLess(1. - score, expected_score) def test_mws(self): from cluster_tools.mutex_watershed import MwsWorkflow config = MwsWorkflow.get_config()["mws_blocks"] config["strides"] = self.strides size_filter = config["size_filter"] with open(os.path.join(self.config_folder, "mws_blocks.config"), "w") as f: json.dump(config, f) task = MwsWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_folder, max_jobs=self.max_jobs, target=self.target, input_path=self.input_path, input_key=self.input_key, output_path=self.output_path, output_key=self.output_key, offsets=self.offsets) ret = luigi.build([task], local_scheduler=True) self.assertTrue(ret) self._check_result(size_filter) if __name__ == "__main__": unittest.main()
constantinpape/cluster_tools
test/mutex_watershed/test_mws.py
test_mws.py
py
2,409
python
en
code
32
github-code
6
27009005628
from scipy.io import loadmat import numpy as np import xlrd as x import pandas as pd def run(file, delimiter): file_name = file["file"] file_type_list = file_name.split(".") file_type = file_type_list[len(file_type_list) - 1] if file_type == 'mat': key = file["key"] array = read_mat(file_name, key) elif file_type == 'csv': array = read_csv(file_name, delimiter) elif file_type == 'txt': array = read_csv(file_name, delimiter) elif file_type == 'xlsx': array = read_xls(file_name) elif file_type == 'xls': array = read_xls(file_name) else: array = np.array([[]]) return {"array": array.tolist()} def read_mat(file, matKey): dict = loadmat(file) return dict[matKey] def read_csv(file, delimiter): array = np.loadtxt(file, delimiter=delimiter) return np.array(array, dtype=float) def read_xls(file): array = pd.read_excel(file, sheet_name=0, header=None) return np.array(array, dtype=float)
lisunshine1234/mlp-algorithm-python
data/read/read/run.py
run.py
py
1,021
python
en
code
0
github-code
6
35610337121
import cv2 import os cam = cv2.VideoCapture("video.avi") values = [] def discrimator(frame): return frame[0][0][1] != 253 # Read each frame. Use discriminator on each frame to output a zero or one. while True: ret, f = cam.read() if not ret: break values.append( discrimator(f) ) ones_and_zeroes = "".join(str(int(x)) for x in values) byte_string = int(ones_and_zeroes, 2).to_bytes(len(ones_and_zeroes) // 8, byteorder='big') os.write(1, byte_string)
sectalks/sectalks
ctf-solutions/LON0x26/bc/vid.py
vid.py
py
492
python
en
code
277
github-code
6
41087502611
import matplotlib.pyplot as plt import pandas as pd def main(): # Charger les données à partir du fichier CSV data = pd.read_csv("Parcoursup 2023 - Total.csv", delimiter=";") # Extraire les colonnes nécessaires dates = pd.to_datetime(data["Date"], format="%d/%m") # type: ignore en_attente = data[ "Candidats n'ayant pas encore reçu de proposition ou en attente de place" ] # Créer les limites des bacs pour l'histogramme en escalier x = range(len(dates) + 1) y = list(en_attente) + [en_attente.iloc[-1]] # Candidats en attente # Créer le graphique avec la courbe en rouge pour les candidats en attente plt.figure(figsize=(8, 6)) plt.step(x, y, where="post", color="orange", linewidth=2, alpha=0.7) plt.xlabel("Date") plt.ylabel( "Candidats n'ayant pas encore reçu de\nproposition ou en attente de place" ) plt.title( "Évolution en fonction du temps du nombre de\ncandidats sans affectation sur Parcoursup en 2023" ) x_ticks = range(0, len(dates), 3) x_labels = dates[x_ticks].dt.strftime("%d/%m") # type: ignore plt.xticks(x_ticks, x_labels, rotation=45) # Ajuster les limites de l'axe x pour supprimer l'espace blanc à gauche plt.xlim(0, len(dates) - 1) plt.ylim(0, 400000) plt.twinx() plt.ylim(0, (400000 / 827271) * 100) plt.ylabel("Pourcentage en fonction du nombre de candidats") plt.tight_layout() plt.savefig( "Évolution en fonction du temps du nombre de candidats sans affectation sur Parcoursup en 2023.svg" ) print( "Le graphique a été enregistré dans le fichier 'Évolution en fonction du temps du nombre de candidats sans affectation sur Parcoursup en 2023.svg'." ) plt.close() if __name__ == "__main__": main()
Ahhj93/Indicateur-Parcoursup-2023
parcoursup_candidats_en_attente.py
parcoursup_candidats_en_attente.py
py
1,820
python
fr
code
2
github-code
6
36960795245
from Player import Player # Asks user to input number of players, and creates as many player objects def initialisePlayers(players): noOfPlayers = int(input("How many players are there? ")) print() try: if noOfPlayers < 2: print("That's too few players! Please enter a number between 2-6.") initialisePlayers() elif noOfPlayers > 6: print("That's too many players! Please enter a number between 2-6.") initialisePlayers() else: # Creating player objects until user input, number of players, is reached # Player hand set to null until cards are dealt for n in range(1, noOfPlayers + 1): p = Player(n, None) players.append(p) except ValueError: print("That's not a number! Please enter a number between 2-6.") initialisePlayers() # Displays active player's hand, and determines playable cards def displayHand(p, upcard): # Temporary list to store playable cards playableCards = [] print("Your cards are:") for c in p.hand: print(str(c.rank) + " of " + c.suit) # If a card matches the upcard in rank or suit, it is a playable card if (c.rank == upcard.rank or c.suit == upcard.suit): playableCards.append(c) return playableCards
mhourican01/jack-change-it
PlayerManager.py
PlayerManager.py
py
1,380
python
en
code
0
github-code
6
34836265477
from django.contrib.contenttypes.models import ContentType from django_filters import rest_framework as filters from music_app.models import Artist, Track, Album from music_app.apps import MusicAppConfig _content_types_id = { 'artist': ContentType.objects.get(app_label=MusicAppConfig.name, model='artist').id, 'album': ContentType.objects.get(app_label=MusicAppConfig.name, model='album').id, 'track': ContentType.objects.get(app_label=MusicAppConfig.name, model='track').id, } class BaseSpotifyFilterSet(filters.FilterSet): name = filters.CharFilter( field_name='name', lookup_expr='icontains', label='Name', ) spotify_id = filters.CharFilter( field_name='spotify_id', lookup_expr='exact', label='Spotify ID', ) spotify_uri = filters.CharFilter( field_name='spotify_uri', lookup_expr='exact', label='Spotify URI', ) class ArtistFilterSet(BaseSpotifyFilterSet): class Meta: model = Artist fields = [] class TrackFilterSet(BaseSpotifyFilterSet): class Meta: model = Track fields = [] class AlbumFilterSet(BaseSpotifyFilterSet): class Meta: model = Album fields = [] class CommentFilterSet(filters.FilterSet): creator = filters.NumberFilter( field_name='creator_id', lookup_expr='exact', label='Creator ID', ) content_type = filters.ChoiceFilter( choices=[ (_content_types_id['artist'], 'artist'), (_content_types_id['album'], 'album'), (_content_types_id['track'], 'track') ], field_name='content_type', lookup_expr='exact', label='Type of model' )
vladyslavtsurkan/django_music_application
music_app/api/filters.py
filters.py
py
1,745
python
en
code
0
github-code
6
40005326365
import pytest from xdlang.structures import XDType, ast from xdlang.visitors.parser import parse_text, transform_parse_tree def parse_and_transform_expr(program_text: str): parsed = parse_text(program_text, start="expr") ast = transform_parse_tree(parse_tree=parsed) return ast @pytest.mark.parametrize( "text,type,value", [ ("42", XDType.INT, 42), ("17.00", XDType.FLOAT, 17.0), ("'Q'", XDType.CHAR, "Q"), ("false", XDType.BOOL, False), ("true", XDType.BOOL, True), ], ) def test_literal(text, type, value): node: ast.LiteralNode = parse_and_transform_expr(text) assert isinstance(node, ast.LiteralNode) assert node.type == type assert node.value == value
mbednarski/xdlang
tests/ast/test_ast_literal.py
test_ast_literal.py
py
742
python
en
code
3
github-code
6
36358232508
import hide headers = hide.headers TOKEN = hide.TOKEN tell_token = hide.tell_token chat_id = hide.chat_id import http.client import mimetypes import ssl import json import time from time import localtime, strftime from datetime import datetime import requests import json # mac has some issue with SLL this fixes it try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default pass else: # Handle target environment that doesn't support HTTPS verification ssl._create_default_https_context = _create_unverified_https_context def send_to_telegram(message): url = 'https://api.telegram.org/bot'+tell_token+'/sendMessage' for i in chat_id: data = {'chat_id': i, 'text': message} try: requests.post(url, data).json() print("Message sent to Telegram") except: print("message did not send") print("start") send_to_telegram("Start - The code has started") time_for_active_6 = 0 time_for_active_7 = 0 while True: try: conn = http.client.HTTPSConnection("apiv4.olarm.co") payload = '' conn.request("GET", "/api/v4/devices/2731071d-a487-44e8-bb29-fb9a189f6e72/events?pageLength=40", payload, headers) res = conn.getresponse() data = res.read() my_json = data.decode('utf8').replace("'", '"') jdata = json.loads(my_json) for i in jdata['data'][::-1]: if i['eventNum'] == 6 and i['eventState'] == 'active': time_for_active_6 = i['eventTime'] if i['eventNum'] == 7 and i['eventState'] == 'active': time_for_active_7 = i['eventTime'] if i['eventNum'] == 6 and i['eventState'] == 'closed': time_for_active_6 = 0 if i['eventNum'] == 7 and i['eventState'] == 'closed': time_for_active_7 = 0 # Function to find the diffrence between two dates. def time_between(d1, d2): d1 = datetime.strptime(d1, "%Y-%m-%d %H:%M:%S") d2 = datetime.strptime(d2, "%Y-%m-%d %H:%M:%S") #return d2-d1 return abs((d2 - d1).seconds/60) if time_for_active_6 != 0: time_from_the_gararge = (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_for_active_6/1000))) current = ( strftime("%Y-%m-%d %H:%M:%S", localtime() ) ) elapsed = time_between(time_from_the_gararge, current) if elapsed > 10: print("The Gararge (orange car) has been open for longer then ",elapsed , "mins" ) send_to_telegram("The door (orange car) has been open for " + str(elapsed) + " mins") time.sleep(120) if time_for_active_7 != 0: time_from_the_gararge = (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_for_active_7/1000))) current = ( strftime("%Y-%m-%d %H:%M:%S", localtime() ) ) elapsed = time_between(time_from_the_gararge, current) if elapsed > 10: print("the Gararge (blue car) has been open for longer then ",elapsed , "mins" ) send_to_telegram("The door (blue car) has been open for " + str(elapsed) + " mins") time.sleep(120) # finaly we wait for 2 mins and then we do the whole process again time.sleep(120) except: print("An error occurred try again") try: send_to_telegram("ERROR - Somthing went wrong - check the terminal") except: print("push bullet / telei is not working") time.sleep(10)
tomashege/Olarm_zone_check
check_zone.py
check_zone.py
py
3,684
python
en
code
0
github-code
6
19243529886
import shlex import django_filters from django.core.exceptions import FieldError from django.db.models import Q # The function and Classes in this file are from https://github.com/nexB/scancode.io/blob/main/scanpipe/filters.py def parse_query_string_to_lookups(query_string, default_lookup_expr, default_field): """Parse a query string and convert it into queryset lookups using Q objects.""" lookups = Q() terms = shlex.split(query_string) lookup_types = { "=": "iexact", "^": "istartswith", "$": "iendswith", "~": "icontains", ">": "gt", "<": "lt", } for term in terms: lookup_expr = default_lookup_expr negated = False if ":" in term: field_name, search_value = term.split(":", maxsplit=1) if field_name.endswith(tuple(lookup_types.keys())): lookup_symbol = field_name[-1] lookup_expr = lookup_types.get(lookup_symbol) field_name = field_name[:-1] if field_name.startswith("-"): field_name = field_name[1:] negated = True else: search_value = term field_name = default_field lookups &= Q(**{f"{field_name}__{lookup_expr}": search_value}, _negated=negated) return lookups class QuerySearchFilter(django_filters.CharFilter): """Add support for complex query syntax in search filter.""" def filter(self, qs, value): if not value: return qs lookups = parse_query_string_to_lookups( query_string=value, default_lookup_expr=self.lookup_expr, default_field=self.field_name, ) try: return qs.filter(lookups) except FieldError: return qs.none() class PackageSearchFilter(QuerySearchFilter): def filter(self, qs, value): if not value: return qs if value.startswith("pkg:"): return qs.for_package_url(value) if "://" not in value and ":" in value: return super().filter(qs, value) search_fields = ["type", "namespace", "name", "version", "download_url"] lookups = Q() for field_names in search_fields: lookups |= Q(**{f"{field_names}__{self.lookup_expr}": value}) return qs.filter(lookups)
nexB/purldb
packagedb/filters.py
filters.py
py
2,388
python
en
code
23
github-code
6
15821882121
#!/usr/bin/env python import rospy from std_msgs.msg import Bool from audio_common_msgs.msg import AudioData import os import argparse import pyaudio import wave import datetime class AudioCapture: def __init__( self, is_record_topic, audio_data_topic, num_channels, sample_rate, chunk_size, format_size, file_name_prefix='', out_file_directory='audio', ): rospy.init_node('audio_recorder', anonymous=True) self._is_record_subscriber = rospy.Subscriber(is_record_topic, Bool, self._record_callback, queue_size=1) self._audio_data_topic = rospy.Subscriber(audio_data_topic, AudioData, self._audio_data_callback, queue_size=1) self._num_channels = num_channels self._sample_rate = sample_rate self._chunk_size = chunk_size self._format_size = format_size self._file_name_prefix = file_name_prefix if len(self._file_name_prefix) > 0: self._file_name_prefix += '_' rospy.loginfo(self._file_name_prefix) self._out_directory = out_file_directory self._start_record_datetime = None self._audio_data = None def _record_callback(self, data): is_record = data.data self._record(is_record) def _record(self, is_record): if is_record: if self._is_recording(): rospy.logerr("Already recording audio") else: rospy.loginfo("Starting to record audio") self._audio_data = [] self._start_record_datetime = datetime.datetime.now() else: if self._is_recording(): rospy.loginfo("Stopped recording audio") self._save_recording(self._audio_data) # Clean up self._audio_data = None self._start_record_datetime = None else: rospy.logerr("No recording in progress") def _audio_data_callback(self, data): if self._is_recording(): self._audio_data.append(data.data) def _is_recording(self): return self._start_record_datetime is not None def _save_recording(self, audio_data): if not os.path.exists(self._out_directory): os.makedirs(self._out_directory) file_name = "{prefix}{date_str}.{ext}".format( prefix=self._file_name_prefix, date_str=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), ext='wav' ) file_path = os.path.join(self._out_directory, file_name) wf = wave.open(file_path, 'wb') wf.setnchannels(self._num_channels) wf.setsampwidth(pyaudio.PyAudio().get_sample_size(self._format_size)) wf.setframerate(self._sample_rate) wf.setnframes(self._chunk_size) wf.writeframes(b''.join(audio_data)) wf.close() if __name__ == "__main__": # Getting the instance_id for the parameters parser = argparse.ArgumentParser(description='instance_id for audio recording') parser.add_argument('--instance_id', help='instance_id for parameters namespace', default="1") args, _ = parser.parse_known_args() # Getting the values as params is_record_topic = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/is_record_topic", "audio_capture/is_record") audio_topic = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/audio_topic", "audio/audio") output_directory = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/output_directory", "/root/audio") num_channels = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/num_channels", 1) sample_rate = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/sample_rate", 16000) chunk_size = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/chunk_size", 1024) format_type = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/format_type", "wave") format_size = eval(rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/format_size", "pyaudio.paInt16")) file_name_prefix = rospy.get_param("/data_capture/"+args.instance_id+"/audio_capture/file_name_prefix", '') assert format_type == "wave" AudioCapture( is_record_topic=is_record_topic, audio_data_topic=audio_topic, out_file_directory=output_directory, num_channels=num_channels, sample_rate=sample_rate, chunk_size=chunk_size, format_size=format_size, file_name_prefix=file_name_prefix ) rospy.spin()
robotpt/ros-data-capture
src/data_capture/audio_capture2/scripts/capture.py
capture.py
py
4,708
python
en
code
0
github-code
6
43242321794
# Same as second example, but using F1 (ALM) import casadi.casadi as cs import opengen as og import json nu = 3 np = 1 u = cs.SX.sym("u", nu) p = cs.SX.sym("p", np) f = cs.dot(u, u) for i in range(nu): f += p * u[i] F1 = cs.sin(u[0]) - 0.3 C = og.constraints.Zero() U = og.constraints.Ball2(None, 0.5) problem = og.builder.Problem(u, p, f) \ .with_constraints(U) \ .with_aug_lagrangian_constraints(F1, C) meta = og.config.OptimizerMeta() \ .with_version("0.0.0") \ .with_authors(["Shane Trimble"]) \ .with_licence("CC4.0-By") \ .with_optimizer_name("shane") build_config = og.config.BuildConfiguration() \ .with_build_directory("python_build") \ .with_build_mode("debug") \ .with_tcp_interface_config() solver_config = og.config.SolverConfiguration() \ .with_tolerance(1e-5) \ .with_initial_tolerance(1e-5) \ .with_initial_penalty(10) \ .with_penalty_weight_update_factor(2) \ .with_max_outer_iterations(20) builder = og.builder.OpEnOptimizerBuilder(problem, metadata=meta, build_configuration=build_config, solver_configuration=solver_config) builder.build() mng = og.tcp.OptimizerTcpManager('python_build/shane') mng.start() pong = mng.ping() # check if the server is alive print(pong) solution = mng.call([1.0]) # call the solver over TCP print(json.dumps(solution, indent=4, sort_keys=False)) mng.kill()
BjoernLindqvist/Crazyflie_NMPC
third_example.py
third_example.py
py
1,718
python
en
code
0
github-code
6
27757527715
import math import torch import torch.nn as nn from torch.nn.parameter import Parameter import util as u def reset_param(t): stdv = 2. / math.sqrt(t.size(0)) t.data.uniform_(-stdv,stdv) class GCN_LSTM(nn.Module): def __init__(self, args, activation, device='cpu'): super().__init__() self.lstm=nn.LSTM( input_size=args.layer_2_feats, hidden_size=args.lstm_feats, num_layers=args.num_lstm_layers ) # self.lstm = nn.GRU( # input_size=args.layer_2_feats, # hidden_size=args.lstm_l2_feats, # num_layers=args.lstm_l2_layers # ) self.device=device self.activation=activation self.num_layers=args.num_gcn_layers self.choose_top_k=TopK(args.layer_2_feats, args.k) self.w_list=nn.ParameterList() for i in range(self.num_layers): if i==0: w_i=Parameter(torch.Tensor(args.feats_per_node, args.layer_1_feats)) reset_param(w_i) else: w_i=Parameter(torch.Tensor(args.layer_1_feats, args.layer_2_feats)) reset_param(w_i) self.w_list.append(w_i) def forward(self, A_list, node_feats, mask_list): last_l_seq=[] for t, Ahat in enumerate(A_list): idx=mask_list[t] Ahat, x=Ahat.to(self.device), node_feats.to(self.device) x=x.matmul(self.w_list[0]) x[idx]=self.activation(Ahat.matmul(x[idx])) for i in range(1, self.num_layers): x=x.matmul(self.w_list[i]) x[idx]=self.activation(Ahat.matmul(x[idx])) last_l_seq.append(x) last_l_seq=torch.stack(last_l_seq) out, _=self.lstm(last_l_seq, None) return out[-1] class TopK(torch.nn.Module): def __init__(self,feats,k): super().__init__() self.scorer = Parameter(torch.Tensor(feats,1)) self.reset_param(self.scorer) self.k = k def reset_param(self,t): #Initialize based on the number of rows stdv = 1. / math.sqrt(t.size(0)) t.data.uniform_(-stdv,stdv) def forward(self,node_embs): scores = node_embs.matmul(self.scorer) / self.scorer.norm() ll=node_embs.shape[0] tanh = torch.nn.Tanh() out=node_embs * tanh(scores.view(-1,1)) if ll<self.k: t=node_embs[-1] * tanh(scores[-1]) t=t.unsqueeze(0).repeat(self.k-ll,1) out =torch.cat([out, t], 0) out=out[:self.k] #we need to transpose the output return out
sunny77889/DyGCN
compare_models/GCN_LSTM/gcn_lstm.py
gcn_lstm.py
py
2,679
python
en
code
3
github-code
6
32028489205
# Prueba # # _nombreVariable = "platano-😸" # # print(_nombreVariable) #Comentario de 1 Linea # # """ # Patos # Todos # # queso # ... # """ """Prueba 01""" # cars = ["Ford","Volvo","BMW"] # # for x in cars: # print(x) # """Prueba 02""" # cars = ["Ford","Volvo","BMW"] # cars.append("Puros😨") # for x in cars: # print(x)V # # cars = ["Ford","Volvo","BMW"] # cars.append("Vamos") # print(cars) # cars.pop() # print(cars) # cars.remove("Ford") # print(cars) # print(len(cars)) fruits = ["apple", "banana", "cherry", "kiwi", "mango"] # newlist = [x for x in fruits if "a" in x] # print(newlist) """Funciones""" # x = "Awesome" # # def myfunc(): # global x # x = "fantastic" # print("python is " + x) # pass # myfunc() # # print("Python is "+x) """Funciones If""" # # x=41 # # if x > 10: # print("Mayor a 10") # if x > 20: # print("mayor a 20") # else : # print("Nada") # else: # print("Menor a 41") """TryExcept""" # try: # print("Hola Mundo") # except NameError: # print("Hubo un error") # else: # print("Finalizado😈") """Switch""" # variable = "😲" # if variable == "Flauta": # print("Resultado esperado") # elif variable == "": # print("Putos") # elif variable == "😲": # print("Tonto ese es un emogi") # else: # print("NADA") """Bucles""" # i=1 # while i<=6: # print(i) # i +=1 # else: # print("CUMPLIDO") # i = 0 # # while i < 10: # # i+=1 # if i == 5: # break # print(i) # print(fruits) # for x in fruits: # print(x) # if x == "banana": # break # """ESPECIALES""" """E - Interpolacion""" # name = input("Introduce nombre") # print(f'God asss {name}') """E - Operador Ternario""" # a = 100 # b = 200 # print(a)if a > b else print(b) if a < b else print("Putos") # _user = False # _msg = 'Usuario Logado' if _user else 'Usuario Estupido' # print(_msg) """E - Recursiva""" def factorial(n): if n == 1: return 1 else: return n * factorial(n - 1) z = factorial(5) # # # print(z) """E - Spread""" """FUNCIONES PYTHON""" x = min(1, 2, 34, 5) print(x)
Andreius-14/Notas_Mini
3.Python/1.sintaxis-py.py
1.sintaxis-py.py
py
2,111
python
en
code
0
github-code
6
13351154708
# -*- coding: utf-8 -*- #electrical calculator import math import cmath import numpy as np import matplotlib.pyplot as plot from matplotlib.offsetbox import AnchoredText #three phase power calculations def singlePhaseLoad( powerConsumed, powerFactor, leadLag): #powerConsumed in kW #power factor #leadLag = 0 - lead, 1 - lag #get the angle from the pf angle = math.acos(powerFactor) #change angle depending on leading or lagging pf. if(leadLag == 1): angle = 0-angle; #calculate the apparent power #S*pf = P apparentPower = powerConsumed/powerFactor; #get reactive Power reactivePower = apparentPower*math.sin(angle); #display values print("Angle is: {} rad / {} degrees").format(angle, math.degrees(angle)) print("Apparent Power: {} VA").format(apparentPower) print("Reactive Power is: {} VAR").format(reactivePower); def threePhaseLoad( powerConsumed, powerFactor, leadLag, voltagel2l): """ Para: powerConsumed - power consumed by the 3phase load in W powerFactor - pf leadLag - leading pf - 0, lagging pf - 1 voltage121 - line to line voltage across the 3 phase load """ #powerConsumed in kW #power factor #leadLag = 0 - lead, 1 - lag #get the angle from the pf angle = math.acos(powerFactor) #change angle depending on leading or lagging pf. if(leadLag == 1): angle = 0-angle; #calculate the apparent power #S*pf = P apparentPower = powerConsumed/powerFactor; #get reactive Power reactivePower = apparentPower*math.sin(angle); #get the line to line current currentl2l = apparentPower/(math.sqrt(3)*voltagel2l); #display values print("Angle is: {} rad / {} degrees").format(angle, math.degrees(angle)) print("Apparent Power: {} VA").format(apparentPower) print("Reactive Power is: {} VAR").format(reactivePower); print("l2l Current is: {} A").format(currentl2l); def polarRec(mod, angleDeg): """ Converts from polar to rectangular coordinates return complexNumber """ return cmath.rect(mod,math.radians(angleDeg)) def recPolar(complexNumber): """ Converts from rectangular to polar coordinates. return (mod, angle in deg) """ ans = cmath.polar(complexNumber) return (ans[0], math.degrees(ans[1])) def paraImp(number1, number2): """ Calculates the total impedence of 2 impedences in parallel """ return (number1*number2)/(number1 + number2) def impBaseConv(voltageRating, powerRating, voltageBase, powerBase, impedenceValue): """ Parameters of equipment are given using the power rating of the equipment as the MVA base. This function converts Z values from old rating to new rating voltageRating - voltage rating of the equipment powerRating - powerRating of the equipment powerBase - the power base of where the equipment is being used voltageBase - the voltage base of where the equipmment is being used impedenceValue - the value to be converted between bases The formula for Zbase = Vbase**2/Sbase """ return impedenceValue*((voltageRating**2/powerRating)/(voltageBase**2/powerBase)) def smibTransCalc(Egen, Vpoc, Xeq, EgenPost, VpocPost, XeqPost): #pre fault graph delta = np.arange(0, 3.14, 0.1); Pe = (abs(Egen)*abs(Vpoc)*np.sin(delta))/abs(Xeq) #post fault graph Pepost = (abs(EgenPost)*abs(VpocPost)*np.sin(delta))/abs(XeqPost) f,ax = plot.subplots(1,1) ax.plot(delta, Pe) #pre fault ax.plot(delta, Pepost) #post fault ax.plot(delta, [ abs(Vpoc) for i in delta]) #mechanical power input plot.title("Power Curve") plot.xlabel("Power Angle Delta (rad)") plot.ylabel("Power (p.u or W)") plot.grid(True, which='both') #plot.text("Pe = {}sin(del)").format((abs(Egen)*abs(Vpoc))/abs(Xeq)) anchored_text = AnchoredText("Pe = {}sin(del)".format((abs(Egen)*abs(Vpoc))/abs(Xeq)), loc=2) ax.add_artist(anchored_text) d0 = math.asin(Vpoc/((abs(Egen)*abs(Vpoc))/abs(Xeq))) d1 = math.asin(Vpoc/((abs(EgenPost)*abs(VpocPost))/abs(XeqPost))) print(d0) print(d1) plot.show()
vdatl5/electricalCalculator
elecCalc.py
elecCalc.py
py
4,282
python
en
code
0
github-code
6
6905807226
NUM_ROWS = 5 NUM_COLS = 9 # construct a matrix my_matrix = {} for row in range(NUM_ROWS): row_dict = {} for col in range(NUM_COLS): row_dict[col] = row * col my_matrix[row] = row_dict # print(my_matrix) d_frmt = '{:<4} {:<4}' for k,v in my_matrix.items(): print("{", k, "}", "\t", v) frmt = '{:<4}' # print the matrix for row in range(NUM_ROWS): for col in range(NUM_COLS): print(frmt.format(my_matrix[row][col]), end='') print()
hqpiotr/learning-python
2. Python - Rice/c3-dataAnalysis/week1/ex.py
ex.py
py
472
python
en
code
0
github-code
6
23837831992
''' Script for building and visualization of v(x) function for fixed x0 and gamma (OSCILLATING case of eigenfunction) ''' import numpy as np import matplotlib.pyplot as plt import pandas as pd import utils x0 = 0.41 gamma = 6.0 AFTER_TANGENT = False SUFFIX_NAME = '_after_tangent' if AFTER_TANGENT else '' CSV_FILE = f'../Tracer/Results/x0={x0:.2f}/x0={x0:.2f}_analytical' + SUFFIX_NAME + '.csv' def get_omega(g): ''' Return omega(gamma) value :param g: gamma value :return: omega(gamma) value ''' df = pd.read_csv(CSV_FILE, sep=';') return df.loc[(df['gamma'] < g+utils.EPS) & (df['gamma'] > g-utils.EPS)]['w'].values[0] def get_v_func_val(g, w, x): ''' Calculate and return value of v(x) for fixed gamma :param g: fixed gamma value :param g: fixed omega value :param x: x value :return: v(x) for fixed gamma ''' if g <= 0: mu = np.sqrt(-g + w*1.0j) return np.cosh(mu*x) else: mu = np.sqrt(g + w*1.0j) return np.cos(mu*x) def draw_v_func_components(xs, vs): ''' Draw 2 graphics: Re(v(x)) dependency and Im(v(x)) dependency :param xs: x values :param vs: v(x) values ''' f = plt.figure(figsize=(10, 4)) f.canvas.set_window_title('x0={:.2}__g={:.4}'.format(x0, gamma)) f.subplots_adjust(left=0.07, bottom=0.1, right=0.97, top=0.97, hspace=0.5) ax1 = f.add_subplot(121) ax2 = f.add_subplot(122) ax1.set_xlabel('x') ax1.set_ylabel('Re v', rotation='horizontal', position=(0.0, 0.53)) ax1.grid() ys = [v.real for v in vs] ax1.plot(xs, ys, color='seagreen', linewidth=2, zorder=3) ax1.axhline(y=0.0, linewidth=2, color='grey', zorder=2) ax1.axvline(x=0.0, linewidth=2, color='grey', zorder=2) ax2.set_xlabel('x') ax2.set_ylabel('Im v', rotation='horizontal', position=(0.0, 0.53)) ax2.grid() ys = [v.imag for v in vs] ax2.plot(xs, ys, color='peru', linewidth=2, zorder=3) ax2.axhline(y=0.0, linewidth=2, color='grey', zorder=2) ax2.axvline(x=0.0, linewidth=2, color='grey', zorder=2) plt.show() if __name__ == '__main__': w = get_omega(gamma) xs = np.linspace(0, 1, 10000) vs = [get_v_func_val(gamma, w, x) for x in xs] draw_v_func_components(xs, vs)
leonel11/KaschenkoEquation
Scripts/oscillating_draw_v_function.py
oscillating_draw_v_function.py
py
2,269
python
en
code
0
github-code
6
73814974266
import mimetypes import os from bonobo.nodes import ( CsvReader, CsvWriter, FileReader, FileWriter, JsonReader, JsonWriter, PickleReader, PickleWriter ) FILETYPE_CSV = "text/csv" FILETYPE_JSON = "application/json" FILETYPE_PICKLE = "pickle" FILETYPE_PLAIN = "text/plain" READER = "reader" WRITER = "writer" class Registry: ALIASES = { "csv": FILETYPE_CSV, "json": FILETYPE_JSON, "pickle": FILETYPE_PICKLE, "plain": FILETYPE_PLAIN, "text": FILETYPE_PLAIN, "txt": FILETYPE_PLAIN, } FACTORIES = { READER: { FILETYPE_JSON: JsonReader, FILETYPE_CSV: CsvReader, FILETYPE_PICKLE: PickleReader, FILETYPE_PLAIN: FileReader, }, WRITER: { FILETYPE_JSON: JsonWriter, FILETYPE_CSV: CsvWriter, FILETYPE_PICKLE: PickleWriter, FILETYPE_PLAIN: FileWriter, }, } def get_factory_for(self, kind, name, *, format=None): if not kind in self.FACTORIES: raise KeyError("Unknown factory kind {!r}.".format(kind)) if format is None and name is None: raise RuntimeError("Cannot guess factory without at least a filename or a format.") # Guess mimetype if possible if format is None: format = mimetypes.guess_type(name)[0] # Guess from extension if possible if format is None: _, _ext = os.path.splitext(name) if _ext: format = _ext[1:] # Apply aliases if format in self.ALIASES: format = self.ALIASES[format] if format is None or not format in self.FACTORIES[kind]: raise RuntimeError( "Could not resolve {kind} factory for {name} ({format}).".format(kind=kind, name=name, format=format) ) return self.FACTORIES[kind][format] def get_reader_factory_for(self, name, *, format=None): """ Returns a callable to build a reader for the provided filename, eventually forcing a format. :param name: filename :param format: format :return: type """ return self.get_factory_for(READER, name, format=format) def get_writer_factory_for(self, name, *, format=None): """ Returns a callable to build a writer for the provided filename, eventually forcing a format. :param name: filename :param format: format :return: type """ return self.get_factory_for(WRITER, name, format=format) default_registry = Registry() def create_reader(name, *args, format=None, registry=default_registry, **kwargs): """ Create a reader instance, guessing its factory using filename (and eventually format). :param name: :param args: :param format: :param registry: :param kwargs: :return: mixed """ return registry.get_reader_factory_for(name, format=format)(name, *args, **kwargs) def create_writer(name, *args, format=None, registry=default_registry, **kwargs): """ Create a writer instance, guessing its factory using filename (and eventually format). :param name: :param args: :param format: :param registry: :param kwargs: :return: mixed """ return registry.get_writer_factory_for(name, format=format)(name, *args, **kwargs)
python-bonobo/bonobo
bonobo/registry.py
registry.py
py
3,404
python
en
code
1,564
github-code
6
21569780010
import rospy import actionlib from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal from prl_pinocchio.tools.configurations import ConfigurationConvertor class Commander: """ This class is in charge of the control of the actual robot. """ def __init__(self, robot, jointsName, trajectory_action_name=None, fwd_topic_name=None, speedScaling = 1.0, accScaling = 1.0): """ Parameters ---------- robot (Robot): Robot class to control. (see robot.py). jointsName (str[]): Name of the joints to actually command. trajectory_action_name (str): Name of the ros action server for controlling the robot with trajectories. fwd_topic_name (str): Name of the ros topic for controlling the robot directly. Optionnals parameters: ---------------------- speedScaling (float): Ratio to control the execution of path (relative to the robot maximum speed). accScaling (float): Ratio to control the execution of path (relative to the robot maximum acceleration). """ self.jointsName = jointsName self.robot = robot # Convert configuration from pin to ros self.converter = ConfigurationConvertor(self.robot.pin_robot_wrapper.model, self.jointsName) self._trajectory_action_name = trajectory_action_name self._fwd_topic_name = fwd_topic_name # Create action client to send the commands self._traj_action_client = None self._fwd_pub_topic = None def start_trajectory(self): """ Start the commander for trajectory control. This requires to have initialized the ROS node already. """ # Check if the action client is already created if self._traj_action_client is None: if self._trajectory_action_name is not None: self._traj_action_client = actionlib.SimpleActionClient(self._trajectory_action_name, FollowJointTrajectoryAction) rospy.loginfo("Waiting for action server " + self._trajectory_action_name) self._traj_action_client.wait_for_server(timeout=rospy.Duration.from_sec(10.0)) else: rospy.logwarn("No action server name provided. start_trajectory() is skipped.") def start_fwd(self): """ Start the commander for fwd control. This requires to have initialized the ROS node already. """ from joint_group_ff_controllers.msg import setpoint # Check if the action client is already created if self._fwd_pub_topic is None: if self._fwd_topic_name is not None: self._fwd_pub_topic = rospy.Publisher(self._fwd_topic_name, setpoint, queue_size=1) else: rospy.logwarn("No action server name provided. start_fwd() is skipped.") def execute_path(self, path, dt=1/125., wait=True): """ Execute a path on the robot. Parameters ---------- path (Path): path to execute. Optionnal parameters: --------------------- dt (float): Time step to discretize the path. wait (bool): If True, wait for the execution of the path to finish before returning. Raises ------ AssertionError: If the start configuration of the path differs too much from the actual robot configuration. AssertionError: If the one or more commanded joint from this Commander is not in the path joints. AssertionError: If the action client is not initialized. """ if self._traj_action_client is None: rospy.logerr("Action client not initialized. Did you call start_trajectory() first.") raise AssertionError("Action client not initialized. Did you call start_trajectory() first.") for i in range(len(path.jointList)): if(self.robot.pin_robot_wrapper.model.names[i+1] != path.jointList[i]): print(self.robot.pin_robot_wrapper.model.names[i+1], path.jointList[i]) # Assume that the robot is always the first components of the configuraitons def q_hpp_to_pin(q): return q[:self.robot.pin_robot_wrapper.model.nq] def v_hpp_to_pin(v): return v[:self.robot.pin_robot_wrapper.model.nv] # Check that the robot is close to the start configuration q_start = q_hpp_to_pin(path.corbaPath.call(0)[0]) assert self.robot.is_at_config(q_start, 1), "The robot current configuration differs too much from the start configuration of the path" # Create ROS message jointTraj = JointTrajectory(joint_names = self.jointsName) # Make point array t = 0 while t < path.corbaPath.length(): t_ros = rospy.Time.from_sec(t) q = self.converter.q_pin_to_ros(q_hpp_to_pin(path.corbaPath.call(t)[0])) q_dot = self.converter.v_pin_to_ros(v_hpp_to_pin(path.corbaPath.derivative(t, 1))) point = JointTrajectoryPoint(positions = q, velocities = q_dot, time_from_start = t_ros) jointTraj.points.append(point) t += dt # Send trajectory to controller jointTraj.header.stamp = rospy.Time(0) self._traj_action_client.send_goal(FollowJointTrajectoryGoal(trajectory=jointTraj)) # Wait for the path to be fully executed if wait: self._traj_action_client.wait_for_result() def execute_fwd(self, q, v, tau, timeout): """ Execute a velocity on the robot. Parameters ---------- q (float[]): Goal configuration of the robot. v (float[]): Goal velocity of the robot. tau (float[]): Goal torque of the robot. timeout (float): Timeout for the execution of the commands. Raises ------ AssertionError: If the action client is not initialized. """ if self._fwd_pub_topic is None: rospy.logerr("Action client not initialized. Did you call start_fwd() first.") raise AssertionError("Action client not initialized. Did you call start_fwd() first.") # Filter joints q = self.converter.q_pin_to_ros(q) v = self.converter.v_pin_to_ros(v) tau = self.converter.v_pin_to_ros(tau) # Send goal to controller self._fwd_pub_topic.publish(positions = q, velocities = v, efforts = tau, timeout = rospy.Duration(timeout))
inria-paris-robotic-lab/prl_hpp_tsid
prl_pinocchio/src/prl_pinocchio/commander.py
commander.py
py
6,622
python
en
code
0
github-code
6
33952361000
from pyimagesearch.centroidtracker import CentroidTracker from pyimagesearch.trackableobject import TrackableObject from imutils.video import VideoStream from imutils.video import FPS import numpy as np import argparse import imutils import time import dlib import cv2 ap = argparse.ArgumentParser() ap.add_argument("-p", "--prototxt", required=True, help="path to Caffe 'deploy' prototxt file") ap.add_argument("-m", "--model", required=True, help="path to Caffe pre-trained model") ap.add_argument("-i", "--input", type=str, help="path to optional input video file") ap.add_argument("-o", "--output", type=str, help="path to optional output video file") ap.add_argument("-c", "--confidence", type=float, default=0.4, help="minimum probability to filter weak detections") ap.add_argument("-s", "--skip-frames", type=int, default=30, help="# of skip frames between detections") args = vars(ap.parse_args()) CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] print("[INFO] loading model...") net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"]) if not args.get("input", False): print("[INFO] starting video stream...") vs = VideoStream(src=0).start() time.sleep(2.0) else: print("[INFO] opening video file...") vs = cv2.VideoCapture(args["input"]) writer = None W = None H = None ct = CentroidTracker(maxDisappeared=40, maxDistance=50) trackers = [] trackableObjects = {} totalFrames = 0 totalDown = 0 totalUp = 0 fps = FPS().start() while True: frame = vs.read() frame = frame[1] if args.get("input", False) else frame if args["input"] is not None and frame is None: break frame = imutils.resize(frame, width=500) rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if W is None or H is None: (H, W) = frame.shape[:2] if args["output"] is not None and writer is None: fourcc = cv2.VideoWriter_fourcc(*"MJPG") writer = cv2.VideoWriter(args["output"], fourcc, 30, (W, H), True) status = "Waiting" rects = [] if totalFrames % args["skip_frames"] == 0: status = "Detecting" trackers = [] blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5) net.setInput(blob) detections = net.forward() for i in np.arange(0, detections.shape[2]): confidence = detections[0, 0, i, 2] if confidence > args["confidence"]: idx = int(detections[0, 0, i, 1]) if CLASSES[idx] != "person": continue box = detections[0, 0, i, 3:7] * np.array([W, H, W, H]) (startX, startY, endX, endY) = box.astype("int") tracker = dlib.correlation_tracker() rect = dlib.rectangle(startX, startY, endX, endY) tracker.start_track(rgb, rect) trackers.append(tracker) else: for tracker in trackers: status = "Tracking" tracker.update(rgb) pos = tracker.get_position() startX = int(pos.left()) startY = int(pos.top()) endX = int(pos.right()) endY = int(pos.bottom()) rects.append((startX, startY, endX, endY)) cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2) objects = ct.update(rects) for (objectID, centroid) in objects.items(): to = trackableObjects.get(objectID, None) if to is None: to = TrackableObject(objectID, centroid) else: y = [c[1] for c in to.centroids] direction = centroid[1] - np.mean(y) to.centroids.append(centroid) if not to.counted: if direction < 0 and centroid[1] < H // 2: totalUp += 1 to.counted = True elif direction > 0 and centroid[1] > H // 2: totalDown += 1 to.counted = True trackableObjects[objectID] = to text = "ID {}".format(objectID) cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1) info = [ ("Up", totalUp), ("Down", totalDown), ("Status", status), ] for (i, (k, v)) in enumerate(info): text = "{}: {}".format(k, v) cv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2) cv2.putText(frame, "Crowd Monitor - Store Entry", (109,26), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) if writer is not None: writer.write(frame) cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break totalFrames += 1 fps.update() fps.stop() print("[INFO] elapsed time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) if writer is not None: writer.release() if not args.get("input", False): vs.stop() else: vs.release() cv2.destroyAllWindows()
Nem3sisX/piedpiper-socialspace
Inside_Store_Model/run.py
run.py
py
4,748
python
en
code
6
github-code
6
27390985743
# Configuration file for the Sphinx documentation builder. # # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html import os import sys sys.path.insert(0, os.path.abspath("../../")) sys.path.insert(0, os.path.abspath(".")) import swiftzoom # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = 'SWIFTzoom' copyright = '2023, Edoardo Altamura' author = 'Edoardo Altamura' release = swiftzoom.__version__ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "recommonmark", "sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.doctest", "sphinx.ext.napoleon", "sphinx.ext.mathjax", "sphinx.ext.autosummary", ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] source_suffix = [".rst", ".md"] master_doc = "index" # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "collapse_navigation": False } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for automatic API doc autodoc_member_order = "bysource" autodoc_default_flags = ["members"] autosummary_generate = True # must be outside run_apidoc definition to be set successfully: os.environ["SPHINX_APIDOC_OPTIONS"] = "members,undoc-members,show-inheritance" def run_apidoc(_): try: from sphinx.ext.apidoc import main except ImportError: from sphinx.apidoc import main sys.path.append(os.path.join(os.path.dirname(__file__), "..")) cur_dir = os.path.abspath(os.path.dirname(__file__)) api_doc_dir = os.path.join(cur_dir, "modules") module = os.path.join(cur_dir, "../..", "swiftzoom") ignore = [ os.path.join(cur_dir, "../..", "tests"), os.path.join(cur_dir, "../..", "swiftzoom/metadata"), ] main(["-M", "-f", "-e", "-T", "-d 0", "-o", api_doc_dir, module, *ignore]) def setup(app): app.connect("builder-inited", run_apidoc)
edoaltamura/swiftzoom
docs/source/conf.py
conf.py
py
3,219
python
en
code
0
github-code
6
42672162843
# -*- coding: utf-8 -*- """ Created on Apr 7 2021 Modified on May 05 2021 @author: Andres Sandino Convert "nii" image format in "png" in Lung WW=-500,WL=1500 """ #%% import os import numpy as np import matplotlib.pyplot as plt import cv2 import nibabel as nib # Patient number patient_no = 1 # Origin path and filename path = 'C:/Users/Andres/Desktop/CTAnotado/resultados/Dr Alvarado/' filename = 'maskEstudio1.nii' # Dest path destpath = 'C:/Users/Andres/Desktop/CovidImages/Mask/' # Load Image img = nib.load(path+filename) img = img.get_fdata() # Image format imgformat = '.png' array=np.asarray(img) #%% [width,length,numslices]=np.shape(array) [m,n,t]=np.shape(array) #for i in range(numslices): for i in range(35,40): #print(i) # List is flipped a=numslices-1-i slide = array[:,:,a] #Labeling files filename='P'+str(patient_no).zfill(4)+'_Im'+str(numslices-a).zfill(4)+'_mask'+imgformat print(filename) # Image rotation 90°, later flip 180° im2=np.rot90(slide) # for i in range(4): # im2=np.rot90(im2) i#m3=im2.copy() im3=np.fliplr(im2) norm_img=cv2.normalize(im3, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F) norm_img=np.uint8(norm_img) cv2.imwrite(destpath+filename, norm_img) #plt.figure() #plt.axis('off') #plt.imshow(norm_img,cmap="gray") #plt.title('slide'+str(t-a))
andres87sg/LungCT
ConvertImages/get_nii_LungMask.py
get_nii_LungMask.py
py
1,553
python
en
code
1
github-code
6
10501866472
""" Fibonaci Number using Bottom-up Dynamic programming approach """ def fibonacci_num(num): table = {} for k in range(1, num+1): if k<=2: f=1 else: f = table[k-1] + table[k-2] table[k] = f print(table[k]) return table[k] fibonacci_num(100)
anojkr/coding-assignment
dynamic_programming/fibonacci.py
fibonacci.py
py
257
python
en
code
0
github-code
6
27894126033
def gcd(a, b): while b > 0: a, b = b, a % b return a def lcm(a, b): return int(a * b / gcd(a, b)) def solution(n, m): answer = [] if n < m: answer.append(gcd(n, m)) answer.append(lcm(n, m)) elif m < n: answer.append(gcd(m, n)) answer.append(lcm(m, n)) return answer print(solution(3, 12)) ''' 최소공배수 = a * b / 최대공약수 최대공약수는 유클리드 호제법으로 구한다. '''
SheepEatLion/Algorithms
num2_progms_1.py
num2_progms_1.py
py
473
python
ko
code
0
github-code
6
22460490121
import os import sys import argparse import time import warnings import torch import torch.nn.functional as F import numpy as np import matplotlib.pyplot as plt sys.path.append(os.path.join(os.getcwd().split('cbo-in-python')[0], 'cbo-in-python')) from src.torch.models import * from src.datasets import load_mnist_dataloaders from src.torch import Optimizer, Loss MODELS = { 'TinyMLP': TinyMLP, 'SmallMLP': SmallMLP, 'LeNet1': LeNet1, 'LeNet5': LeNet5, } DATASETS = { 'MNIST': load_mnist_dataloaders, } def _evaluate(model, X_, y_, loss_fn): with torch.no_grad(): outputs = model(X_) y_pred = torch.argmax(outputs, dim=1) loss = loss_fn(outputs, y_) acc = 1. * y_.eq(y_pred).sum().item() / y_.shape[0] return loss, acc def train(model, train_dataloader, test_dataloader, device, use_multiprocessing, processes, epochs, particles, particles_batch_size, alpha, sigma, l, dt, anisotropic, eps, partial_update, cooling, eval_freq): train_accuracies = [] train_losses = [] test_accuracies = [] test_losses = [] optimizer = Optimizer(model, n_particles=particles, alpha=alpha, sigma=sigma, l=l, dt=dt, anisotropic=anisotropic, eps=eps, partial_update=partial_update, use_multiprocessing=use_multiprocessing, n_processes=processes, particles_batch_size=particles_batch_size, device=device) loss_fn = Loss(F.nll_loss, optimizer) n_batches = len(train_dataloader) for epoch in range(epochs): epoch_train_accuracies = [] epoch_train_losses = [] for batch, (X, y) in enumerate(train_dataloader): X, y = X.to(device), y.to(device) train_loss, train_acc = _evaluate(model, X, y, F.nll_loss) epoch_train_accuracies.append(train_acc) epoch_train_losses.append(train_loss.cpu()) optimizer.zero_grad() loss_fn.backward(X, y, backward_gradients=False) optimizer.step() if batch % eval_freq == 0 or batch == n_batches - 1: with torch.no_grad(): losses = [] accuracies = [] for X_test, y_test in test_dataloader: X_test, y_test = X_test.to(device), y_test.to(device) loss, acc = _evaluate(model, X_test, y_test, F.nll_loss) losses.append(loss.cpu()) accuracies.append(acc) val_loss, val_acc = np.mean(losses), np.mean(accuracies) if batch == n_batches - 1: test_accuracies.append(val_acc) test_losses.append(val_loss) print( f'Epoch: {epoch + 1:2}/{epochs}, batch: {batch + 1:4}/{n_batches}, train loss: {train_loss:8.3f}, ' f'train acc: {train_acc:8.3f}, test loss: {val_loss:8.3f}, test acc: {val_acc:8.3f}', flush=True) train_accuracies.append(np.mean(epoch_train_accuracies)) train_losses.append(np.mean(epoch_train_losses)) if cooling: optimizer.cooling_step() return train_accuracies, test_accuracies, train_losses, test_losses def build_plot(epochs, model_name, dataset_name, plot_path, train_acc, test_acc, train_loss, test_loss): plt.rcParams['figure.figsize'] = (20, 10) plt.rcParams['font.size'] = 25 epochs_range = np.arange(1, epochs + 1, dtype=int) plt.clf() fig, (ax1, ax2) = plt.subplots(1, 2) ax1.plot(epochs_range, train_acc, label='train') ax1.plot(epochs_range, test_acc, label='test') ax1.legend() ax1.set_xlabel('epoch') ax1.set_ylabel('accuracy') ax1.set_title('Accuracy') ax2.plot(epochs_range, train_loss, label='train') ax2.plot(epochs_range, test_loss, label='test') ax2.legend() ax2.set_xlabel('epoch') ax2.set_ylabel('loss') ax2.set_title('Loss') plt.suptitle(f'{model_name} @ {dataset_name}') plt.savefig(plot_path) if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--model', type=str, default='SmallMLP', help=f'architecture to use', choices=list(MODELS.keys())) parser.add_argument('--dataset', type=str, default='MNIST', help='dataset to use', choices=list(DATASETS.keys())) parser.add_argument('--device', type=str, choices=['cuda', 'cpu'], default='cuda', help='whether to use GPU (cuda) for accelerated computations or not') parser.add_argument('--use_multiprocessing', action='store_true', help='specify to use multiprocessing for accelerating computations on CPU ' '(note, it is impossible to use multiprocessing with GPU)') parser.add_argument('--processes', type=int, default=4, help='how many processes to use for multiprocessing') parser.add_argument('--epochs', type=int, default=10, help='train for EPOCHS epochs') parser.add_argument('--batch_size', type=int, default=60, help='batch size (for samples-level batching)') parser.add_argument('--particles', type=int, default=100, help='') parser.add_argument('--particles_batch_size', type=int, default=10, help='batch size ' '(for particles-level batching)') parser.add_argument('--alpha', type=float, default=50, help='alpha from CBO dynamics') parser.add_argument('--sigma', type=float, default=0.4 ** 0.5, help='sigma from CBO dynamics') parser.add_argument('--l', type=float, default=1, help='lambda from CBO dynamics') parser.add_argument('--dt', type=float, default=0.1, help='dt from CBO dynamics') parser.add_argument('--anisotropic', type=bool, default=True, help='whether to use anisotropic or not') parser.add_argument('--eps', type=float, default=1e-5, help='threshold for additional random shift') parser.add_argument('--partial_update', type=bool, default=True, help='whether to use partial or full update') parser.add_argument('--cooling', type=bool, default=False, help='whether to apply cooling strategy') parser.add_argument('--build_plot', required=False, action='store_true', help='specify to build loss and accuracy plot') parser.add_argument('--plot_path', required=False, type=str, default='demo.png', help='path to save the resulting plot') parser.add_argument('--eval_freq', type=int, default=100, help='evaluate test accuracy every EVAL_FREQ ' 'samples-level batches') args = parser.parse_args() warnings.filterwarnings('ignore') model = MODELS[args.model]() train_dataloader, test_dataloader = DATASETS[args.dataset](train_batch_size=args.batch_size, test_batch_size=args.batch_size) device = args.device if args.device == 'cuda' and not torch.cuda.is_available(): print('Cuda is unavailable. Using CPU instead.') device = 'cpu' use_multiprocessing = args.use_multiprocessing if device != 'cpu' and use_multiprocessing: print('Unable to use multiprocessing on GPU') use_multiprocessing = False device = torch.device(device) print(f'Training {args.model} @ {args.dataset}') start_time = time.time() result = train(model, train_dataloader, test_dataloader, device, use_multiprocessing, args.processes, args.epochs, args.particles, args.particles_batch_size, args.alpha, args.sigma, args.l, args.dt, args.anisotropic, args.eps, args.partial_update, args.cooling, args.eval_freq) print(f'Elapsed time: {time.time() - start_time} seconds') if args.build_plot: build_plot(args.epochs, args.model, args.dataset, args.plot_path, *result)
Igor-Tukh/cbo-in-python
demo/torch_nn_demo.py
torch_nn_demo.py
py
8,219
python
en
code
3
github-code
6
1822828953
#BinarySearch import math def binsearch(n,arr,alen): print("Array to search:",arr) si = 0 ei = alen-1 while si<=ei: m = math.ceil((si+ei)/2) if n == arr[m]: print("\t\tFound {} at {}".format(n,m)) return m elif n < arr[m]: print("\t\t{} < {}".format(n,arr[m])) ei = m-1 else: print("\t\t{} > {}".format(n,arr[m])) si = m+1 print("\n si:{}, ei:{}, mid:{}".format(si,ei,m)) return -1 arr = [2,4,6,8,34,56,89] n = 89 print(binsearch(n,arr,len(arr))) arr = "ahbgdc" n = "b" print(binsearch(n,arr,len(arr)))
sushasru/LeetCodeCrunch
wwc_BinarySearch.py
wwc_BinarySearch.py
py
704
python
en
code
0
github-code
6
26625675366
from django import template import re try: from django.utils.safestring import mark_safe except ImportError: mark_safe = lambda s:s register = template.Library() def rfc3339_date(date): return date.strftime('%Y-%m-%dT%H:%M:%SZ') register.filter('atom_date', rfc3339_date) def atom_tag_uri(url, date=None): tag = re.sub('^https?://', '', url) if date: tag = re.sub('/', ',%s:/' % date.strftime('%Y-%m-%d'), tag, 1) tag = re.sub('#', '/', tag) return 'tag:' + tag register.filter('atom_tag_uri', atom_tag_uri) def feed_safe_name(name): return name.replace(' ', '_').lower() register.filter('feed_safe_name', feed_safe_name) GOOGLE_TAGS = ('actor', 'age', 'age_range', 'agent', 'area', 'artist', 'aspect_ratio', 'author', 'bathrooms', 'battery_life', 'bedrooms', 'binding', 'brand', 'broker', 'calories', 'capacity', 'cholesterol', 'color', 'color_output', 'condition', 'cooking_time', 'course', 'course_date_range', 'course_number', 'course_times', 'cuisine', 'currency', 'department', 'description', 'director', 'display_type', 'edition', 'education', 'employer', 'ethnicity', 'event_date_range', 'event_type', 'expiration_date', 'expiration_date_time', 'feature', 'fiber', 'film_type', 'focus_type', 'format', 'from_location', 'functions', 'gender', 'genre', 'heel_height', 'height', 'hoa_dues', 'id', 'image_link', 'immigration_status', 'installation', 'interested_in', 'isbn', 'job_function', 'job_industry', 'job_type', 'language', 'length', 'link', 'listing_status', 'listing_type', 'load_type', 'location', 'lot_size', 'made_in', 'main_ingredient', 'make', 'marital_status', 'material', 'meal_type', 'megapixels', 'memory_card_slot', 'mileage', 'mls_listing_id', 'mls_name', 'model', 'model_number', 'mpn', 'name_of_item_reviewed', 'news_source', 'occasion', 'occupation', 'open_house_date_range', 'operating_system', 'optical_drive', 'pages', 'payment_accepted', 'payment_notes', 'performer', 'pickup', 'platform', 'preparation_time', 'price', 'price_type', 'processor_speed', 'product_type', 'property_taxes', 'property_type', 'protein', 'provider_class', 'provider_name', 'provider_telephone_number', 'publication_name', 'publication_volume', 'publish_date', 'publisher', 'quantity', 'rating', 'recommended_usage', 'resolution', 'review_type', 'reviewer_type', 'salary', 'salary_type', 'saturated_fat', 'school', 'school_district', 'screen_size', 'service_type', 'servings', 'sexual_orientation', 'shipping', 'shoe_width', 'size', 'sleeps', 'sodium', 'style', 'subject', 'tax_percent', 'tax_region', 'tech_spec_link', 'title', 'to_location', 'total_carbs', 'total_fat', 'travel_date_range', 'university', 'upc', 'url_of_item_reviewed', 'vehicle_type', 'venue_description', 'venue_name', 'venue_type', 'venue_website', 'vin', 'weight', 'width', 'wireless_interface', 'year', 'zoning', 'zoom' ) def make_googlebase_option(opt, custom): """Convert an option into a tag. First look to see if it is a predefined tag, if it is, good, use it. Otherwise make a custom tag.""" custom = custom.lower() in ('true','t','1') return make_googlebase_tag(opt.option_group.name, opt.name,custom) register.filter('make_googlebase_option', make_googlebase_option) def make_googlebase_attribute(att, custom): """Convert an attribute into a tag. First look to see if it is a predefined tag, if it is, good, use it. Otherwise make a custom tag.""" custom = custom.lower() in ('true','t','1') return make_googlebase_tag(att.name, att.value, custom) register.filter('make_googlebase_attribute', make_googlebase_attribute) def make_googlebase_tag(key, val, custom): """Convert a key/val pair into a tag. First look to see if it is a predefined tag, if it is, good, use it. Otherwise make a custom tag.""" key = feed_safe_name(key) if key in GOOGLE_TAGS: tag = "<g:%s>%s</g:%s>" elif key.endswith('s') and key[:-1] in GOOGLE_TAGS: key = key[:-1] tag = "<g:%s>%s</g:%s>" elif custom: tag = "<c:%s:string>%s</c:%s:string>" else: tag = None if tag: return mark_safe(tag % (key, val, key)) else: return "" def stripspaces(s): s = re.sub(r'^\s+', '', s) s = re.sub(r'\s+$', '', s) s = s.replace('\n\n','\n') return s register.filter('stripspaces', stripspaces)
dokterbob/satchmo
satchmo/apps/satchmo_ext/product_feeds/templatetags/satchmo_feed.py
satchmo_feed.py
py
4,527
python
en
code
30
github-code
6
26712172198
#!/usr/bin/env python3 # -*- coding: utf-8 -*- if __name__ == '__main__': s = "Доброе утро, товарищ" if 'а' in s: print(f"Порядковый номер первой буквы а: {s.find('а') + 1}") else: print("В предложении нет буквы а")
BorsukovVladislav/LR6
PyCharm/Individual/Task2.py
Task2.py
py
312
python
ru
code
0
github-code
6
40466806630
import pyvirtualcam import cv2 import time from filters import Filters import math from datetime import datetime import ML.HandTrackingModule as htm class VCam: def __init__(self, mxhand, video, f, detCon=0.5, cw=640, ch=480, du=True): cv2.namedWindow('feedback') self.videocap = video self.filterList = ['normal', 'negative', 'bgr2gray'] self.filterIndex = 0 self.inputKey = -1 # Utils self.toDU = du self.nextX, self.nextY = cw - 40, ch // 2 self.prevX, self.prevY = 40, ch // 2 self.escX, self.escY = cw - 40, 40 self.radius = 40 # Hand tracking self.detector = htm.HandDetector(detectionCon=detCon, maxHands=mxhand) self.finger = f self.pressing = False self.initialTime = datetime.timestamp(datetime.now()) self.vc = cv2.VideoCapture(self.videocap) if not self.vc.isOpened(): raise RuntimeError('Can\'t open your camera, please check if videocap is validy device, try using "v4l2-ctl --list-device"') self.vc.set(cv2.CAP_PROP_FRAME_WIDTH, cw) self.vc.set(cv2.CAP_PROP_FRAME_HEIGHT, ch) self.vc.set(cv2.CAP_PROP_FPS, 30) # Query final capture device values (may be different from preferred settings). self.width = int(self.vc.get(cv2.CAP_PROP_FRAME_WIDTH)) self.height = int(self.vc.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.fps_out = self.vc.get(cv2.CAP_PROP_FPS) self.ret, self.frame = self.vc.read() if not self.ret: raise RuntimeError('Error fetching frame') self.display = True def start(self): with pyvirtualcam.Camera(self.width, self.height, self.fps_out, print_fps=False, fmt=pyvirtualcam.PixelFormat.BGR,) as cam: print(f'Virtual cam started: {cam.device} ({cam.width}x{cam.height} @ {cam.fps}fps)') pTime, cTime = 0,0 while self.display: # Read frame from webcam. self.ret, self.frame = self.vc.read() self.frame = cv2.flip(self.frame, 1) if not self.ret: raise RuntimeError('Error fetching frame') # Hand track control self.handCommands() self.inputKey = cv2.waitKey(1) if self.inputKey != -1: self.camInputs() filter = self.filterList[self.filterIndex] self.frame = getattr(Filters, filter)(self.frame) cTime = time.time() fps = int(1 / (cTime - pTime)) pTime = cTime cv2.putText(self.frame, str(fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cam.send(self.frame) if self.toDU: self.drawUtils() self.detector.drawMarks(self.frame, drawFingerMark=[self.finger]) cv2.imshow('feedback', self.frame) print('Virtual camera closed') def handCommands(self): self.detector.findHands(self.frame) lmList, bbox = self.detector.findPosition(self.frame) if lmList: fingerX, fingerY = lmList[self.finger][1], lmList[self.finger][2] init = datetime.timestamp(datetime.now()) # next filter if math.hypot(fingerX - self.nextX, fingerY - self.nextY) <= 30: actual = datetime.timestamp(datetime.now()) if not self.pressing: self.pressing = True self.initialTime = init else: presstime = actual - self.initialTime if presstime >= 1: self.filterIndex = (self.filterIndex + 1) % len(self.filterList) self.pressing = False # previus filter elif math.hypot(fingerX - self.prevX, fingerY - self.prevY) <= 30: actual = datetime.timestamp(datetime.now()) if not self.pressing: self.pressing = True self.initialTime = init else: presstime = actual - self.initialTime if presstime >= 1: self.filterIndex = (self.filterIndex - 1) % len(self.filterList) self.pressing = False # close cam elif math.hypot(fingerX - self.escX, fingerY - self.escY) <= 30: actual = datetime.timestamp(datetime.now()) if not self.pressing: self.pressing = True self.initialTime = init else: presstime = actual - self.initialTime if presstime >= 2: self.vc.release() self.display = False self.pressing = False else: self.pressing = False self.initialTime = init def camInputs(self): # ESC if self.inputKey == 27: cv2.destroyWindow('feedback') self.vc.release() self.display = False # [ elif self.inputKey == 91: self.filterIndex = (self.filterIndex - 1) % len(self.filterList) # ] elif self.inputKey == 93: self.filterIndex = (self.filterIndex + 1) % len(self.filterList) def drawUtils(self): # Drawing area for hand tracker commands cv2.circle(self.frame, (self.nextX, self.nextY), self.radius, (255, 0, 0)) cv2.circle(self.frame, (self.prevX, self.prevY), self.radius, (255, 0, 0)) cv2.circle(self.frame, (self.escX, self.escY), self.radius, (255, 0, 0))
biguelito/funcam
vcam.py
vcam.py
py
5,797
python
en
code
0
github-code
6
30970248515
from euphorie.content.browser.country import ManageUsers from euphorie.content.countrymanager import ICountryManager from euphorie.content.sector import ISector class OSHAManageUsers(ManageUsers): @property def sectors(self): sectors_list = [] for sector in self.country.values(): if not ISector.providedBy(sector): continue entry = { "id": sector.id, "login": sector.login, "password": sector.password, "title": sector.title, "url": sector.absolute_url(), "locked": sector.locked, "contact_email": sector.contact_email, } view = sector.restrictedTraverse("manage-ldap-users", None) if not view: entry["managers"] = [] else: entry["managers"] = [ userid for userid in view.local_roles_userids() if view.get_user(userid) ] sectors_list.append(entry) sectors_list.sort(key=lambda s: s["title"].lower()) return sectors_list @property def managers(self): managers_list = [ { "id": manager.id, "login": manager.login, "title": manager.title, "url": manager.absolute_url(), "locked": manager.locked, "contact_email": manager.contact_email, } for manager in self.country.values() if ICountryManager.providedBy(manager) ] managers_list.sort(key=lambda s: s["title"].lower()) return managers_list
euphorie/osha.oira
src/osha/oira/content/browser/country.py
country.py
py
1,736
python
en
code
4
github-code
6
10981447634
''' Created on Oct 26, 2015 @author: jcheung Developed for Python 2. May work for Python 3 too (but I never tried) with minor changes. ''' import xml.etree.cElementTree as ET import codecs class WSDInstance: def __init__(self, my_id, lemma, context, index): self.id = my_id # id of the WSD instance self.lemma = lemma # lemma of the word whose sense is to be resolved self.context = context # lemma of all the words in the sentential context self.index = index # index of lemma within the context def __str__(self): ''' For printing purposes. ''' return '%s\t%s\t%s\t%d' % (self.id, self.lemma, ' '.join(self.context), self.index) def load_instances(f): ''' Load two lists of cases to perform WSD on. The structure that is returned is a dict, where the keys are the ids, and the values are instances of WSDInstance. ''' tree = ET.parse(f) root = tree.getroot() dev_instances = {} test_instances = {} for text in root: if text.attrib['id'].startswith('d001'): instances = dev_instances else: instances = test_instances for sentence in text: # construct sentence context context = [to_ascii(el.attrib['lemma']) for el in sentence] for i, el in enumerate(sentence): if el.tag == 'instance': my_id = el.attrib['id'] lemma = to_ascii(el.attrib['lemma']) instances[my_id] = WSDInstance(my_id, lemma, context, i) return dev_instances, test_instances def load_key(f): ''' Load the solutions as dicts. Key is the id Value is the list of correct sense keys. ''' dev_key = {} test_key = {} for line in open(f): if len(line) <= 1: continue #print (line) doc, my_id, sense_key = line.strip().split(' ', 2) if doc == 'd001': dev_key[my_id] = sense_key.split() else: test_key[my_id] = sense_key.split() return dev_key, test_key def to_ascii(s): # remove all non-ascii characters return codecs.encode(s, 'ascii', 'ignore')
JGuymont/lesk-algorithm
lesk/loader.py
loader.py
py
2,227
python
en
code
3
github-code
6
31277293302
import unittest from unittest.mock import Mock from src.display.point import Point from src.display.window import Window class TestPoint(unittest.TestCase): def test_draw(self): # Given display = Mock() window = Window(Point.Zero, 10, 10, display) # When window.draw('hello world', Point.Zero) # Then display.draw.assert_called_once() def test_draw_pointHasOffset(self): # Given display = Mock() window = Window(Point.One, 10, 10, display) # When window.draw('hello world', Point.Zero) # Then display.draw.assert_called_once_with('hello world', Point.One) def test_write_callsDraw(self): # Given display = Mock() window = Window(Point.One, 10, 10, display) # When window.writeLine('hi') # Then display.draw.assert_called_once_with('hi', Point.Zero) def test_draw_WithPositionZero(self): # Given display = Mock() window = Window(Point.One, 10, 10, display) # When window.draw('hi', Position.Zero) # Then display.draw.assert_called_once_with('hi', Point.Zero) def test_write_None_doesNotCallDraw(self): # Given display = Mock() window = Window(Point.One, 10, 10, display) # When window.writeLine(None) # Then display.draw.assert_not_called() if __name__ == '__main__': unittest.main()
TrevorVonSeggern/gcode-terminal
test/testWindow.py
testWindow.py
py
1,503
python
en
code
0
github-code
6
22773337009
#!/usr/local/epics/modules/pythonIoc/pythonIoc from softioc import softioc, builder from netmon import netmon net = netmon("switch_list.csv") builder.LoadDatabase() softioc.iocInit() net.start_monit_loop() softioc.interactive_ioc(globals())
star-controls/network-switch-monitor
main.py
main.py
py
247
python
en
code
0
github-code
6
70808563069
import torch from .assign_result import AssignResult from .base_assigner import BaseAssigner def calc_region(bbox, ratio, stride, featmap_size=None): # Base anchor locates in (stride - 1) * 0.5 f_bbox = (bbox - (stride - 1) * 0.5) / stride x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2]) y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3]) x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2]) y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3]) if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1] - 1) y1 = y1.clamp(min=0, max=featmap_size[0] - 1) x2 = x2.clamp(min=0, max=featmap_size[1] - 1) y2 = y2.clamp(min=0, max=featmap_size[0] - 1) return (x1, y1, x2, y2) def anchor_ctr_inside_region_flags(anchors, stride, region): x1, y1, x2, y2 = region f_anchors = (anchors - (stride - 1) * 0.5) / stride x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5 y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5 flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2) return flags def anchor_outside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border) & \ (flat_anchors[:, 1] >= -allowed_border) & \ (flat_anchors[:, 2] < img_w + allowed_border) & \ (flat_anchors[:, 3] < img_h + allowed_border) else: inside_flags = valid_flags outside_flags = ~inside_flags return outside_flags class RegionAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). """ # TODO update docs def __init__(self, center_ratio=0.2, ignore_ratio=0.5): self.center_ratio = center_ratio self.ignore_ratio = ignore_ratio def assign(self, mlvl_anchors, mlvl_valid_flags, gt_bboxes, img_meta, featmap_sizes, anchor_scale, anchor_strides, gt_bboxes_ignore=None, gt_labels=None, allowed_border=0): """Assign gt to anchors. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. Assign every anchor to 0 (negative) For each gt_bboxes: 2. Compute ignore flags based on ignore_region then assign -1 to anchors w.r.t. ignore flags 3. Compute pos flags based on center_region then assign gt_bboxes to anchors w.r.t. pos flags 4. Compute ignore flags based on adjacent anchor lvl then assign -1 to anchors w.r.t. ignore flags 5. Assign anchor outside of image to -1 Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ # TODO support gt_bboxes_ignore if gt_bboxes_ignore is not None: raise NotImplementedError if gt_bboxes.shape[0] == 0: raise ValueError('No gt bboxes') num_gts = gt_bboxes.shape[0] num_lvls = len(mlvl_anchors) r1 = (1 - self.center_ratio) / 2 r2 = (1 - self.ignore_ratio) / 2 scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() # 1. assign 0 (negative) by default mlvl_assigned_gt_inds = [] mlvl_ignore_flags = [] for lvl in range(num_lvls): h, w = featmap_sizes[lvl] assert h * w == mlvl_anchors[lvl].shape[0] assigned_gt_inds = gt_bboxes.new_full((h * w, ), 0, dtype=torch.long) ignore_flags = torch.zeros_like(assigned_gt_inds) mlvl_assigned_gt_inds.append(assigned_gt_inds) mlvl_ignore_flags.append(ignore_flags) for gt_id in range(num_gts): lvl = target_lvls[gt_id].item() featmap_size = featmap_sizes[lvl] stride = anchor_strides[lvl] anchors = mlvl_anchors[lvl] gt_bbox = gt_bboxes[gt_id, :4] # Compute regions ignore_region = calc_region(gt_bbox, r2, stride, featmap_size) ctr_region = calc_region(gt_bbox, r1, stride, featmap_size) # 2. Assign -1 to ignore flags ignore_flags = anchor_ctr_inside_region_flags( anchors, stride, ignore_region) mlvl_assigned_gt_inds[lvl][ignore_flags > 0] = -1 # 3. Assign gt_bboxes to pos flags pos_flags = anchor_ctr_inside_region_flags(anchors, stride, ctr_region) mlvl_assigned_gt_inds[lvl][pos_flags > 0] = gt_id + 1 # 4. Assign -1 to ignore adjacent lvl if lvl > 0: d_lvl = lvl - 1 d_anchors = mlvl_anchors[d_lvl] d_featmap_size = featmap_sizes[d_lvl] d_stride = anchor_strides[d_lvl] d_ignore_region = calc_region(gt_bbox, d_stride, r2, d_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( d_anchors, d_stride, d_ignore_region) mlvl_ignore_flags[d_lvl][ignore_flags > 0] = 1 if lvl < num_lvls - 1: u_lvl = lvl + 1 u_anchors = mlvl_anchors[u_lvl] u_featmap_size = featmap_sizes[u_lvl] u_stride = anchor_strides[u_lvl] u_ignore_region = calc_region(gt_bbox, u_stride, r2, u_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( u_anchors, u_stride, u_ignore_region) mlvl_ignore_flags[u_lvl][ignore_flags > 0] = 1 # 4. (cont.) Assign -1 to ignore adjacent lvl for lvl in range(num_lvls): ignore_flags = mlvl_ignore_flags[lvl] mlvl_assigned_gt_inds[lvl][ignore_flags > 0] = -1 # 5. Assign -1 to anchor outside of image flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds) flat_anchors = torch.cat(mlvl_anchors) flat_valid_flags = torch.cat(mlvl_valid_flags) assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] == flat_valid_flags.shape[0]) outside_flags = anchor_outside_flags(flat_anchors, flat_valid_flags, img_meta['img_shape'], allowed_border) flat_assigned_gt_inds[outside_flags] = -1 if gt_labels is not None: assigned_labels = torch.zeros_like(flat_assigned_gt_inds) pos_flags = assigned_gt_inds > 0 assigned_labels[pos_flags] = gt_labels[ flat_assigned_gt_inds[pos_flags] - 1] else: assigned_labels = None return AssignResult( num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)
thangvubk/Cascade-RPN
mmdet/core/bbox/assigners/region_assigner.py
region_assigner.py
py
8,816
python
en
code
177
github-code
6
38314545769
from src.web.fetch import Fetch class Coupang: URL = "https://www.coupang.com" _REQUEST_HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)" "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36", } _COOKIES = { # Update PCID when coupang is not working. PCID can be found in the network tab of the browser console. "PCID": "10790271030004111726842", } @classmethod def get_comment(cls, opt): link = '/vp/product/reviews' res = Fetch.post(Coupang.URL + link, data={ 'productId': '5647481827', 'page': 1, 'size': 5, 'sortBy': 'DATE_DESC', 'ratings': 'null', 'q': '', 'viRoleCode': 0, 'ratingSummary': 'true' }, skip_auto_headers=Coupang._REQUEST_HEADERS, cookies=Coupang._COOKIES) print(res) if __name__ == "__main__": Coupang.get_comment({})
jshyunbin/comment_crawler
src/mall/coupang.py
coupang.py
py
994
python
en
code
2
github-code
6
2857680026
import numpy as np import torch from functools import reduce # Required in Python 3 import operator def prod(iterable): return reduce(operator.mul, iterable, 1) def multi_index_to_single(tensor, index): i = 0 return torch.stack([index[i] * prod([tensor.shape[j] for j in range(i + 1, tensor.ndim)]) + index[i + 1] for i in range(len(index) - 1)]).sum(0) # return torch.stack([torch.Tensor([index[i] * prod([tensor.shape[j] for j in range(i + 1, tensor.ndim)]) + index[i + 1]]) for i in range(len(index) - 1)]).sum(0) def add_at(tensor_a, index, tensor_b): index_flat = multi_index_to_single(tensor_a, index) # print(tensor_b.flatten().shape) # print(index_flat.max()) # print(torch.index_select(tensor_a.flatten(), 0, index_flat)) # torch.index_add(tensor_a.flatten().float(), 0, index_flat, tensor_b.flatten()) # print(tensor_a.device, tensor_b.device, index_flat.device) return tensor_a.flatten().index_add_(0, index_flat, tensor_b.flatten()).reshape(tensor_a.shape) # gvi = np.random.randn(5, 5) + np.random.randn(5, 5) * 1j gvi = np.random.randn(5, 5) + np.random.randn(5, 5) * 1j # tensor(4067017.2500) tensor(141988016.) torch.float32 torch.Size([5982336]) # tensor(48913.6250) tensor(22521834.) torch.float32 torch.Size([5982336]) gvi = np.zeros([2300, 2300], dtype=complex) visg = 141988016 * np.random.randn(5982336) + 4067017 + 22521834j * np.random.randn(5982336) + 48913j undxi = np.random.randint(0, 2300, size=(len(visg))) vndxi = np.random.randint(0, 2300, size=(len(visg))) gvi_t = torch.from_numpy(gvi) np.add.at(gvi, (undxi, vndxi), visg) visg_t = torch.from_numpy(visg) undxi = torch.from_numpy(undxi) vndxi = torch.from_numpy(vndxi) gvi_tr = gvi_t.real gvi_ti = gvi_t.imag visg_tr = visg_t.real visg_ti = visg_t.imag add_at(gvi_tr, (undxi, vndxi), visg_tr) add_at(gvi_ti, (undxi, vndxi), visg_ti) gvi_t = torch.view_as_complex(torch.stack([gvi_tr, gvi_ti], dim=-1)) assert np.isclose(gvi_t, gvi).all()
DavidRuhe/interferometry
src/gridding_python_improved/add_at.py
add_at.py
py
1,989
python
en
code
0
github-code
6
11924464860
# Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]: if head: return_node = ListNode(head.val) curr_node = head.next while curr_node: return_node = ListNode(curr_node.val, return_node) curr_node = curr_node.next return return_node else: return head
jasonxchen/leetcode
0206-reverse-linked-list/0206-reverse-linked-list.py
0206-reverse-linked-list.py
py
546
python
en
code
2
github-code
6
14764098844
import sys from typing import List, Tuple def _get_element_orders(arr: List[int], key: int) -> Tuple[List[int], List[int], int]: """ return two lists - one of figures less than key and one of those greater - and the count of key in arr """ less, greater, equal = [], [], 0 for i in arr: if i < key: less.append(i) elif i > key: greater.append(i) else: equal += 1 return less, greater, equal def _get_majority_element(arr: List[int], majority_size: int) -> int: """ return the value of the majority element between l and h if this element exists, and return -1 if no such element exists """ less_than, greater_than, equal = _get_element_orders(arr, arr[0]) if equal > majority_size: return arr[0] elif len(less_than) > majority_size: return _get_majority_element(less_than, majority_size) elif len(greater_than) > majority_size: return _get_majority_element(greater_than, majority_size) else: return -1 def get_majority_element(arr: List[int]) -> int: """ wrapper function for _get_majority_element which calculates the majority size """ majority_size = len(arr) / 2 return _get_majority_element(arr, majority_size) if __name__ == '__main__': input_data = sys.stdin.read() n, *a = list(map(int, input_data.split())) if get_majority_element(a) != -1: print(1) else: print(0)
JoeLove100/data-structures-and-algorithms
divide_and_conquer/majority_element.py
majority_element.py
py
1,557
python
en
code
0
github-code
6
22728666874
from imageai.Detection import ObjectDetection import os import sys prices = { 'bottle' : 11, 'apple' : 20, 'orange' : 20, 'sandwich' : 20, 'hot_dog' : 20, 'pizza' : 20, 'donut' : 20, 'cake' : 20 } def processImage(input_file,output_file) : os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' execution_path = os.getcwd() # input_file = sys.argv[1] # output_file = sys.argv[2] detector = ObjectDetection() detector.setModelTypeAsYOLOv3() detector.setModelPath( os.path.join(execution_path , "yolo.h5")) detector.loadModel() custom_objects = detector.CustomObjects(bottle=True, apple=True, orange=True, sandwich=True, hot_dog=True, pizza=True, donut=True, cake=True) detections = detector.detectCustomObjectsFromImage(custom_objects=custom_objects, input_image=os.path.join(execution_path , input_file), output_image_path=os.path.join(execution_path , output_file), minimum_percentage_probability=30) totalPrice = 0 for eachObject in detections: totalPrice = totalPrice + prices[eachObject["name"]] # print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] , " : " , prices[eachObject["name"]] , " Baht" ) # print("================================") return ( totalPrice , output_file ) result = processImage(sys.argv[1],sys.argv[2]) print("result totalPrice",result[0])
sawatdee/AI-image-processing
home/libs/shop_detection.py
shop_detection.py
py
1,461
python
en
code
0
github-code
6
15135490887
#!/usr/bin/env python2 from psychopy import core, visual, event #create a window to draw in myWin = visual.Window([400,400.0], allowGUI=False) #INITIALISE SOME STIMULI gabor = visual.GratingStim(myWin,tex="sin",mask="gauss",texRes=256, size=[1.0,1.0], sf=[4,0], ori = 0, name='gabor1') gabor.autoDraw = True message = visual.TextStim(myWin,pos=(0.0,-0.9),text='Hit Q to quit') trialClock = core.Clock() #repeat drawing for each frame while trialClock.getTime()<20: gabor.phase += 0.01 message.draw() #handle key presses each frame if event.getKeys(keyList=['escape','q']): myWin.close() core.quit() myWin.flip()
honeymustard33/experiment_riskdetection
project/psycho/psychopy/demos/coder/stimuli/gabor.py
gabor.py
py
671
python
en
code
0
github-code
6
31015459461
import os import sys BASE_DIR = os.path.dirname(__file__) sys.path.append(BASE_DIR) sys.path.append(os.path.join(BASE_DIR, '../utils')) import tensorflow as tf import numpy as np import tf_util from pointnet_util import pointnet_sa_module, pointnet_fp_module def placeholder_inputs(batch_size, num_point): pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 4))#3 labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point)) smpws_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point)) return pointclouds_pl, labels_pl, smpws_pl def get_model(point_cloud, is_training, num_class, bn_decay=None, keep_prob=0.9): """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """ batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value end_points = {} if(point_cloud.get_shape()[2] == 3): l0_xyz = point_cloud l0_points = None end_points['l0_xyz'] = l0_xyz elif(point_cloud.get_shape()[2] == 4): l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3]) # xyz BxNx3 l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 1]) # i BxNx1 end_points['l0_xyz'] = l0_xyz # Layer 1 l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.4, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.8, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=1.6, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3') l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=3.2, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4') # Feature Propagation layers l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1') l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2') l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3') l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4') # FC layers net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) end_points['feats'] = net net = tf_util.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp1') net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2') return net, end_points def get_loss(pred, label, smpw): """ pred: BxNxC, label: BxN, smpw: BxN """ classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=label, logits=pred, weights=smpw) tf.summary.scalar('classify loss', classify_loss) tf.add_to_collection('losses', classify_loss) return classify_loss def get_iou_loss(pred,label): label = tf.one_hot(label, pred.shape[2]) pred = tf.nn.softmax(pred) inter = tf.reduce_sum(tf.multiply(pred, label), [0, 1]) #保留 第三维 union = tf.reduce_sum(tf.subtract(tf.add(pred, label), tf.multiply(pred, label)), [0, 1]) + 1e-12 loss = -tf.log(tf.reduce_mean(tf.div(inter, union))) #先求每个iou 再求平均 return loss def get_iou_loss_with_num(pred,label,num): label = tf.one_hot(label, pred.shape[2]) pred = tf.nn.softmax(pred) inter = tf.reduce_sum(tf.multiply(pred, label), [0, 1]) #保留 第三维 union = tf.reduce_sum(tf.subtract(tf.add(pred, label), tf.multiply(pred, label)), [0, 1]) + 1e-12 # print(inter.shape) # print(union.shape) loss = -tf.log(8/num*tf.reduce_mean(tf.div(inter, union))) #先求每个iou 再求平均 return loss def get_iou(pred,label,n_class): """Evaluation script to compute pixel level IoU. Args: label: N-d array of shape [batch, W, H], where each element is a class index. pred: N-d array of shape [batch, W, H], the each element is the predicted class index. n_class: number of classes epsilon: a small value to prevent division by 0 Returns: IoU: array of lengh n_class, where each element is the average IoU for this class. tps: same shape as IoU, where each element is the number of TP for each class. fps: same shape as IoU, where each element is the number of FP for each class. fns: same shape as IoU, where each element is the number of FN for each class. """ # label = tf.one_hot(label, n_class) pred = tf.nn.softmax(pred) inter = tf.reduce_sum(tf.multiply(pred, label), [0, 1]) # 保留 第三维 union = tf.reduce_sum(tf.subtract(tf.add(pred, label), tf.multiply(pred, label)), [0, 1]) + 1e-12 # print(inter.shape) # print(union.shape) iou = tf.div(inter, union) # 先求每个iou 再求平均 mean_iou = tf.reduce_mean(iou) return iou, mean_iou # assert label.shape == pred.shape, \ # 'label and pred shape mismatch: {} vs {}'.format( # label.shape, pred.shape) # # epsilon = 1e-12 # ious = tf.placeholder(dtype=tf.float32,shape = n_class) # tps = tf.placeholder(dtype=tf.float32,shape = n_class) # fns = tf.placeholder(dtype=tf.float32,shape = n_class) # fps = tf.placeholder(dtype=tf.float32,shape = n_class) # # n_class = tf.convert_to_tensor(n_class) # # for cls_id in range(n_class): # tp = tf.reduce_sum(pred[label.eval == cls_id].eval == cls_id) # fp = tf.reduce_sum(label[pred.eval == cls_id].eval != cls_id) # fn = tf.reduce_sum(pred[label.eval == cls_id].eval != cls_id) # # ious[cls_id] = tp / (tp + fn + fp + epsilon) # tps[cls_id] = tp # fps[cls_id] = fp # fns[cls_id] = fn # return ious, tps, fps, fn # def get_iou(pred, label, n_class): # """Evaluation script to compute pixel level IoU. # # Args: # label: N-d array of shape [batch, W, H], where each element is a class # index. # pred: N-d array of shape [batch, W, H], the each element is the predicted # class index. # n_class: number of classes # epsilon: a small value to prevent division by 0 # # Returns: # IoU: array of lengh n_class, where each element is the average IoU for this # class. # tps: same shape as IoU, where each element is the number of TP for each # class. # fps: same shape as IoU, where each element is the number of FP for each # class. # fns: same shape as IoU, where each element is the number of FN for each # class. # """ # # assert label.shape == pred.shape, \ # 'label and pred shape mismatch: {} vs {}'.format( # label.shape, pred.shape) # # epsilon = 1e-12 # pred = pred.eval # label = label.eval # # ious = np.zeros(n_class) # tps = np.zeros(n_class) # fns = np.zeros(n_class) # fps = np.zeros(n_class) # # for cls_id in range(n_class): # tp = np.sum(pred[label == cls_id] == cls_id) # fp = np.sum(label[pred == cls_id] != cls_id) # fn = np.sum(pred[label == cls_id] != cls_id) # # ious[cls_id] = tp / (tp + fn + fp + epsilon) # tps[cls_id] = tp # fps[cls_id] = fp # fns[cls_id] = fn # # return ious, tps, fps, fns if __name__=='__main__': with tf.Graph().as_default(): inputs = tf.zeros((32,1024,4))#32 2048 3 net, _ = get_model(inputs, tf.constant(True), 8)#10 print(net)
kxhit/BDCI2018-pointnet-
models/pointnet2_sem_seg_xyzi_final.py
pointnet2_sem_seg_xyzi_final.py
py
8,126
python
en
code
6
github-code
6
74494631546
""" Coin recognition, real life application task: calculate the value of coins on picture """ import cv2 import numpy as np def detect_coins(): coins = cv2.imread('../input_image/koruny.jpg', 1) gray = cv2.cvtColor(coins, cv2.COLOR_BGR2GRAY) img = cv2.medianBlur(gray, 7) circles = cv2.HoughCircles( img, # source image cv2.HOUGH_GRADIENT, # type of detection 1, 50, param1=100, param2=50, minRadius=10, # minimal radius maxRadius=380, # max radius ) coins_copy = coins.copy() for detected_circle in circles[0]: x_coor, y_coor, detected_radius = detected_circle coins_detected = cv2.circle( coins_copy, (int(x_coor), int(y_coor)), int(detected_radius), (0, 255, 0), 4, ) cv2.imwrite("../output_image/coin_amount/koruny_test_Hough.jpg", coins_detected) return circles def calculate_amount(): koruny = { "1 CZK": { "value": 1, "radius": 20, "ratio": 1, "count": 0, }, "2 CZK": { "value": 2, "radius": 21.5, "ratio": 1.075, "count": 0, }, "5 CZK": { "value": 5, "radius": 23, "ratio": 1.15, "count": 0, }, "10 CZK": { "value": 10, "radius": 24.5, "ratio": 1.225, "count": 0, }, "20 CZK": { "value": 20, "radius": 26, "ratio": 1.3, "count": 0, }, "50 CZK": { "value": 50, "radius": 27.5, "ratio": 1.375, "count": 0, }, } circles = detect_coins() radius = [] coordinates = [] for detected_circle in circles[0]: x_coor, y_coor, detected_radius = detected_circle radius.append(detected_radius) coordinates.append([x_coor, y_coor]) smallest = min(radius) tolerance = 0.0375 total_amount = 0 coins_circled = cv2.imread('../output_image/coin_amount/koruny_test_Hough.jpg', 1) font = cv2.FONT_HERSHEY_SIMPLEX for coin in circles[0]: ratio_to_check = coin[2] / smallest coor_x = coin[0] coor_y = coin[1] for koruna in koruny: value = koruny[koruna]['value'] if abs(ratio_to_check - koruny[koruna]['ratio']) <= tolerance: koruny[koruna]['count'] += 1 total_amount += koruny[koruna]['value'] cv2.putText(coins_circled, str(value), (int(coor_x), int(coor_y)), font, 1, (0, 0, 0), 4) print(f"The total amount is {total_amount} CZK") for koruna in koruny: pieces = koruny[koruna]['count'] print(f"{koruna} = {pieces}x") cv2.imwrite("../output_image/coin_amount/koruny_hodnota.jpg", coins_circled) if __name__ == "__main__": calculate_amount()
tinazhouhui/computer_vision
image_analysis/coin_amount_calculate.py
coin_amount_calculate.py
py
3,054
python
en
code
1
github-code
6
35508636359
#START{ import os from github import Github import json import sys import re import time from tabulate import tabulate def clone_repos(GITHUB_ACCESS_TOKEN,GITHUB_USERNAME): g = Github(GITHUB_ACCESS_TOKEN) # Create "repos" folder if it doesn't exist if not os.path.exists("repos"): os.makedirs("repos") # Create "public" and "private" folders within "repos" public_folder = os.path.join("repos", "public") private_folder = os.path.join("repos", "private") if not os.path.exists(public_folder): os.makedirs(public_folder) if not os.path.exists(private_folder): os.makedirs(private_folder) # Clone all public repositories owned by the user for repo in g.get_user().get_repos(affiliation='owner'): if not repo.private: os.makedirs(os.path.join(public_folder), exist_ok=True) if os.path.exists(os.path.join(public_folder, repo.name)): pass else: os.system(f"git clone {repo.clone_url} {os.path.join(public_folder, repo.name)}") os.system(f"rm -rf {os.path.join(public_folder, repo.name, '.git')}") # Clone all private repositories owned by the user for repo in g.get_user().get_repos(affiliation='owner'): if repo.private: os.makedirs(os.path.join(private_folder), exist_ok=True) # Include the access token and username in the clone URL to avoid being prompted for them if os.path.exists(os.path.join(private_folder, repo.name)): pass else: os.system(f"git clone https://{GITHUB_USERNAME}:{GITHUB_ACCESS_TOKEN}@{repo.clone_url.split('://')[1]} {os.path.join(private_folder, repo.name)}") os.system(f"rm -rf {os.path.join(private_folder, repo.name, '.git')}") def is_binary_file(filepath): with open(filepath, 'rb') as f: chunk = f.read(1024) if b'\0' in chunk: return True return False def count_lines(filepath, language): total_lines = 0 code_lines = 0 comment_lines = 0 empty_lines = 0 with open(filepath, 'r', encoding='utf-8', errors='ignore') as f: for line in f: total_lines += 1 line = line.strip() if not line: empty_lines += 1 elif re.match(language['comment_regex'], line): comment_lines += 1 else: code_lines += 1 return (total_lines, code_lines, comment_lines, empty_lines) def get_language(filepath, languages): for language in languages.values(): for extension in language['extensions']: if filepath.endswith(extension): return language return None def get_filetypes(dirpath): filetypes = {} for root, dirs, files in os.walk(dirpath): for file in files: filepath = os.path.join(root, file) if not is_binary_file(filepath): ext = os.path.splitext(file)[1] if ext not in filetypes: filetypes[ext] = 0 filetypes[ext] += 1 return filetypes def main(): dirpath = "./repos" if not os.path.isdir(dirpath): print("Invalid directory path") return with open("languages.json") as f: languages = json.load(f) total_lines = 0 total_code_lines = 0 total_comment_lines = 0 total_empty_lines = 0 lang_lines = {} filetypes = get_filetypes(dirpath) new_dict = {} for key in languages: name = languages[key]["name"].upper() new_dict[name] = 0 for root, dirs, files in os.walk(dirpath): for file in files: filepath = os.path.join(root, file) if not is_binary_file(filepath): language = get_language(filepath, languages) if language: (total, code, comment, empty) = count_lines(filepath, language) total_lines += total total_code_lines += code total_comment_lines += comment total_empty_lines += empty lang_name = language["name"].upper() new_dict[f"{lang_name}"] += 1 if lang_name not in lang_lines: lang_lines[lang_name] = {'total': 0, 'code': 0, 'comment': 0, 'empty': 0} lang_lines[lang_name]['total'] += total lang_lines[lang_name]['code'] += code lang_lines[lang_name]['comment'] += comment lang_lines[lang_name]['empty'] += empty total_files = sum(new_dict.values()) #all_var="" #all_var+="Language Files Total Lines Code Lines Comment Lines Empty Lines"+"\n" #all_var+="-"*84+"\n" data = [] for lang, lines in lang_lines.items(): total = lines['total'] code = lines['code'] comment = lines['comment'] empty = lines['empty'] data.append({'Language': f'{lang}', 'Files': new_dict[f"{lang}"], 'Total Lines': total, 'Code Lines': code, 'Comment Lines': comment, 'Empty Lines': empty}) #all_var+="{:<12}{:<9}{:<17}{:<17}{:<19}{}".format(lang, new_dict[f"{lang}"], total, code, comment, empty)+"\n" #all_var+="-"*84+"\n" #all_var+="{:<12}{:<9}{:<17}{:<17}{:<19}{}".format("TOTAL", total_files, total_lines, total_code_lines, total_comment_lines, total_empty_lines) #data.append({'Language': 'TOTAL', 'Files': total_files, 'Total Lines': total_lines, 'Code Lines': total_code_lines, 'Comment Lines': total_comment_lines, 'Empty Lines': total_empty_lines}) return data def format_table(data): headers = ['Language', 'Files', 'Total Lines', 'Code Lines', 'Comment Lines', 'Empty Lines'] # Sort the data by Total Lines in descending order sorted_data = sorted(data, key=lambda x: x['Total Lines'], reverse=True) table = [] for d in sorted_data: row = [d['Language'], d['Files'], d['Total Lines'], d['Code Lines'], d['Comment Lines'], d['Empty Lines']] table.append(row) table.append(['TOTAL', sum(d['Files'] for d in data), sum(d['Total Lines'] for d in data), sum(d['Code Lines'] for d in data), sum(d['Comment Lines'] for d in data), sum(d['Empty Lines'] for d in data)]) return tabulate(table, headers, tablefmt='pipe') if __name__ == '__main__': if len(sys.argv) < 2: print(f"Usage: python {os.path.basename(__file__)} 'GITHUB_ACCESS_TOKEN' 'GITHUB_USERNAME'") exit() else: clone_repos(sys.argv[1],sys.argv[2]) all_var=format_table(main()) with open('README.md', 'w') as f: # Get current date and time in seconds since the epoch seconds_since_epoch = time.time() # Format the value as a date and time string date_time_string = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(seconds_since_epoch)) # Print the value print(date_time_string) f.write(all_var) f.write("\n\nLast Update: "+date_time_string) print(all_var) #}END.
TAFFAHACHRAF/TAFFAHACHRAF
main.py
main.py
py
7,201
python
en
code
3
github-code
6
73924482426
import re from bs4 import BeautifulSoup ''' 要爬取信息: 1. 基本信息 2. 作者简介 3. 内容简介 4. 原文摘录 5. 推荐电子书 6. 推荐书籍 7. 评论 ''' class Parser(object): def __init__(self, soup): self.title = soup.find('span', property="v:itemreviewed") self.imgLink = soup.find('a', class_='nbg') self.score = soup.find('div', class_='rating_self clearfix') self.blockquote = soup.find('ul', class_='blockquote-list') self.comments = soup.find('div', class_='comment-list new_score show') self.moreInfo = soup.find('div', id="info") # 内容简介 tmp = soup.find('div', class_="indent", id='link-report') if tmp: self.summary = tmp.find('span', class_='all hidden') if self.summary is None: self.summary = tmp.find('div', class_='intro') else: self.summary = None # 作者简介 tmp = soup.find('div', class_='related_info').find('div', class_="indent", id=False) if tmp: self.author = tmp.find('span', class_='all hidden') if self.author is None: self.author = tmp.find('div', class_='intro') else: self.author = None # 推荐电子书 tmp = soup.find('div', id='rec-ebook-section') if tmp: self.recom_ebook = tmp.find('div', class_='content clearfix') else: self.recom_ebook = None # 推荐书籍 tmp = soup.find('div', id='db-rec-section') if tmp: self.recom_book = tmp.find('div', class_='content clearfix') else: self.recom_book = None # 总的解析函数 def parse_all(self): # 创建字典用来存放解析到的信息 bookDict = dict() # 将信息整合到moveiDict中 bookDict['基本信息'] = self.parse_info() bookDict['内容简介'] = self.parse_summary() bookDict['作者简介'] = self.parse_author() bookDict['原文摘录'] = self.parse_blockquote() bookDict['电子书推荐'] = self.parse_recom_ebook() bookDict['书籍推荐'] = self.parse_recom_book() bookDict['热评'] = self.parse_comment() return bookDict # 解析基本信息 def parse_info(self): infoDict = dict() # 1.标题 infoDict['标题'] = self.title.text # 总不至于连标题也没有吧 # 2.图片链接 if self.imgLink: infoDict['图片链接'] = self.imgLink.img['src'] # 3.评分 {'评分':['xxx 评分', 'xxx 人数']} if self.score: score = self.score.text.split() score[1] = score[1][:-3] infoDict['评分'] = score # 5.解析更多信息 infoDict.update(self.parse_moreinfo()) return infoDict # 解析更多信息 def parse_moreinfo(self): infoDict = dict() for item in re.split('<br>|<br/>', str(self.moreInfo)): # 不同信息 item = BeautifulSoup(item, "html.parser") item = re.sub('[ \n]', '', item.text) if not item: continue item = item.split(':') key = item[0] value = item[1].strip() infoDict[key] = value return infoDict # 获取内容简介 def parse_summary(self): if not self.summary: return None summary = '' for par in self.summary.find_all('p'): par = par.text.replace('\u3000', '') summary = summary + par + '\par' return summary # 获取作者简介 def parse_author(self): if not self.author: return None author = '' for par in self.author.find_all('p'): par = par.text.replace('\u3000', '') author = author + par + '\par' return author # 获取原文摘录 def parse_blockquote(self): if not self.blockquote: return None blockquote = list() [s.extract() for s in self.blockquote('div')] for item in self.blockquote.find_all('figure'): blockquote.append(item.text.strip().split(' (查看原文)')[0]) return blockquote # 相关电子书推荐 def parse_recom_ebook(self): if not self.recom_ebook: return None recom_ebook = list() for item in self.recom_ebook.find_all('dl'): imgLink = item.img imgLink = imgLink['src'] recom_ebook.append([item.text.split()[0], imgLink]) return recom_ebook # 相关书籍推荐 def parse_recom_book(self): if not self.recom_book: return None recom_book = list() for item in self.recom_book.find_all('dl'): if not item.text: continue imgLink = item.img imgLink = imgLink['src'] recom_book.append([item.text.split()[0], imgLink]) return recom_book # 评论 def parse_comment(self): if not self.comments: return None comments = [] for item in self.comments.find_all('span', class_='short'): comments.append(item.text) return comments # 接口函数 def run(self): # print('开始解析') res = self.parse_all() return res # 将dict 转换为 json格式 # return json.dumps(res, ensure_ascii=False, indent=1) # if __name__ == '__main__': # # 打开文件进行解析 # inputPath = r'C:\Users\31363\Desktop\Workspace\lab_web\book_spider\doc\demo.html' # soup = BeautifulSoup(open(inputPath, encoding='utf8'), 'html.parser') # parser = Parser(soup) # parser.parse_all()
icecream-and-tea/labs_web
lab1/lab1_stage1/book_spider/src/html_parser.py
html_parser.py
py
5,863
python
en
code
2
github-code
6
37018468843
import numpy as np # randomly sampling 100 obsev from t-distribution N = 1000 df = N-1 X = np.random.standard_t(df, size = N) import matplotlib.pyplot as plt from scipy.stats import t x_values = np.arange(-5,5,0.1) y_values = t.pdf(x_values,df) # Sample Distribution count, bins, ignored = plt.hist(X, 20, density = True,color = 'purple',label = 'Sample Distribution') # Population Distribution plt.plot(x_values,y_values, color = 'y', linewidth = 2.5,label = 'Population Distribution') #adding title and y-label plt.title("Randomly sampled from standard Student t-distribution") plt.ylabel("Probability") plt.legend() plt.show()
TatevKaren/mathematics-statistics-for-data-science
Probability-Distribution-Functions/Student t distribution.py
Student t distribution.py
py
634
python
en
code
88
github-code
6
40261683440
import sys sys.path.append("..") import os import pandas import re import math import argparse from models.train_model import get_training_model_new from train.ds_iterator import DataIterator from train.ds_client_generator import DataGeneratorClient from keras.optimizers import Adam from keras.callbacks import LearningRateScheduler, ModelCheckpoint, CSVLogger, TensorBoard from keras.layers.convolutional import Conv2D from keras.applications.vgg19 import VGG19 def get_last_epoch(): data = pandas.read_csv(TRAINING_LOG) return max(data['epoch'].values) # euclidean loss as implemented in caffe https://github.com/BVLC/caffe/blob/master/src/caffe/layers/euclidean_loss_layer.cpp def eucl_loss(x, y): return K.sum(K.square(x - y)) / batch_size / 2 def step_decay(epoch): initial_lrate = base_lr steps = epoch * iterations_per_epoch lrate = initial_lrate * math.pow(gamma, math.floor(steps/stepsize)) return lrate if __name__ == '__main__': batch_size = 60 base_lr = 4e-5 # 2e-5 momentum = 0.9 weight_decay = 5e-4 lr_policy = "step" gamma = 0.333 stepsize = 68053#136106 #// after each stepsize iterations update learning rate: lr=lr*gamma max_iter = 20000 # 600000 #True = start data generator client, False = use augmented dataset file (deprecated) use_client_gen = True parser = argparse.ArgumentParser() parser.add_argument('--stages', type=int, default =6, help='number of stages') parser.add_argument('--port', type=int, default =5555, help= 'port where training data is running' ) parser.add_argument('--folder',type=str,default="weights_logs/5p_6/",help='"Where to save this training"' ) parser.add_argument('--gpu',default =1, help= 'what gpu to use, if "all" try to allocate on every gpu' ) parser.add_argument('--gpu_fraction', type=float, default =0.6, help= 'how much memory of the gpu to use' ) parser.add_argument('--np1', type=int, default =12, help= 'Number of pafs' ) parser.add_argument('--np2', type=int, default =6, help= 'number of heatmaps' ) args = parser.parse_args() folder = args.folder stages=int(args.stages) port=int(args.port) fraction = float(args.gpu_fraction) np1=int(args.np1)#12 #number of channels for pafs np2=int(args.np2)#6#number of channels for parts gpu = int(args.gpu) print(gpu) #stages=2#number of stages of network if gpu != 'all': print(gpu) os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"]="%d"%gpu import keras.backend as K import tensorflow as tf os.makedirs(folder,exist_ok=True) config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = fraction session = tf.Session(config=config) WEIGHTS_100_EPOCH = os.path.join(folder,"weights-2-{epoch:04d}_%d_%d_%d.h5"%(stages,np1,np2)) WEIGHTS_BEST = os.path.join(folder,"weights_%d_%d_%d.best.h5"%(stages,np1,np2)) WEIGHTS_COMPLETE = os.path.join(folder,"complete_model_%d_%d_%d.h5"%(stages,np1,np2)) TRAINING_LOG = os.path.join(folder,"training_new_%d_%d_%d.csv"%(stages,np1,np2)) LOGS_DIR = os.path.join(folder,"logs/") os.makedirs(LOGS_DIR,exist_ok=True) model = get_training_model_new(weight_decay,np1=np1,np2=np2,stages=stages) from_vgg = dict() from_vgg['conv1_1'] = 'block1_conv1' from_vgg['conv1_2'] = 'block1_conv2' from_vgg['conv2_1'] = 'block2_conv1' from_vgg['conv2_2'] = 'block2_conv2' from_vgg['conv3_1'] = 'block3_conv1' from_vgg['conv3_2'] = 'block3_conv2' from_vgg['conv3_3'] = 'block3_conv3' from_vgg['conv3_4'] = 'block3_conv4' from_vgg['conv4_1'] = 'block4_conv1' from_vgg['conv4_2'] = 'block4_conv2' # load previous weights or vgg19 if this is the first run if os.path.exists(WEIGHTS_BEST): print("Loading the best weights...") model.load_weights(WEIGHTS_BEST) last_epoch = get_last_epoch() + 1 else: print("Loading vgg19 weights...") vgg_model = VGG19(include_top=False, weights='imagenet') for layer in model.layers: if layer.name in from_vgg: vgg_layer_name = from_vgg[layer.name] layer.set_weights(vgg_model.get_layer(vgg_layer_name).get_weights()) print("Loaded VGG19 layer: " + vgg_layer_name) last_epoch = 0 # prepare generators if use_client_gen: train_client = DataGeneratorClient(port=port, host="localhost", hwm=160, batch_size=20,np1=np1,np2=np2,stages=stages) train_client.start() # check ds_generator_client.py train_di = train_client.gen() train_samples = 100 else: pass # Add our augmenter for check stuff # setup lr multipliers for conv layers lr_mult=dict() for layer in model.layers: if isinstance(layer, Conv2D): # stage = 1 if re.match("Mconv\d_stage1.*", layer.name): kernel_name = layer.weights[0].name bias_name = layer.weights[1].name lr_mult[kernel_name] = 1 lr_mult[bias_name] = 2 # stage > 1 elif re.match("Mconv\d_stage.*", layer.name): kernel_name = layer.weights[0].name bias_name = layer.weights[1].name lr_mult[kernel_name] = 4 lr_mult[bias_name] = 8 # vgg else: kernel_name = layer.weights[0].name bias_name = layer.weights[1].name lr_mult[kernel_name] = 1 lr_mult[bias_name] = 2 # configure loss functions losses = {} for i in range(1,stages+1): losses["weight_stage"+str(i)+"_L1"] = eucl_loss losses["weight_stage"+str(i)+"_L2"] = eucl_loss print(losses.keys()) # learning rate schedule - equivalent of caffe lr_policy = "step" iterations_per_epoch = train_samples // batch_size # configure callbacks lrate = LearningRateScheduler(step_decay) checkpoint = ModelCheckpoint(WEIGHTS_BEST, monitor='loss', verbose=0, save_best_only=False, save_weights_only=True, mode='min', period=1) checkpoint2 = ModelCheckpoint(WEIGHTS_100_EPOCH, monitor='loss', verbose=0, save_best_only=False, save_weights_only=True, mode='min', period=100) checkpoint3 = ModelCheckpoint(WEIGHTS_COMPLETE, monitor='loss', verbose=0, save_best_only=True,save_weights_only=False, mode='min', period=100) csv_logger = CSVLogger(TRAINING_LOG, append=True) tb = TensorBoard(log_dir=LOGS_DIR, histogram_freq=0, write_graph=True, write_images=False) callbacks_list = [lrate, checkpoint, csv_logger, tb,checkpoint2,checkpoint3] # sgd optimizer with lr multipliers #multisgd = MultiSGD(lr=base_lr, momentum=momentum, decay=0.0, nesterov=False, lr_mult=lr_mult) multisgd = Adam(lr=base_lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) # start training model.compile(loss=losses, optimizer=multisgd, metrics=["accuracy"]) model.fit_generator(train_di, steps_per_epoch=train_samples // batch_size, epochs=max_iter, callbacks=callbacks_list, #validation_data=val_di, #validation_steps=val_samples // batch_size, use_multiprocessing=False, initial_epoch=last_epoch )
piperod/beepose
beepose/train/train_stages.py
train_stages.py
py
7,547
python
en
code
8
github-code
6
37430539138
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' name: shopnum1 GuidBuyList.aspx SQL注入 referer: http://www.wooyun.org/bugs/wooyun-2015-0118447 author: Lucifer description: 文件GuidBuyList.aspx中,参数guid存在SQL注入。 ''' import sys import requests class shopnum_GuidBuyList_sqli_BaseVerify: def __init__(self, url): self.url = url def run(self): headers = { "User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50" } payload = "/GuidBuyList.aspx?guid=97dcbadc-9b4f-4ff5-9ffb-17e46e10d66d%27AnD(ChAr(66)%2BChAr(66)%2BChAr(66)%2B@@VeRsiOn)%3E0--" vulnurl = self.url + payload try: req = requests.get(vulnurl, headers=headers, timeout=10, verify=False) if r"BBBMicrosoft" in req.text: return "[+]存在shopnum1 GuidBuyList.aspx SQL注入漏洞...(高危)\tpayload: "+vulnurl except: return "[-]connect timeout" if __name__ == "__main__": testVuln = shopnum_GuidBuyList_sqli_BaseVerify(sys.argv[1]) testVuln.run()
iceyhexman/onlinetools
scanner/plugins/cms/shopnum/shopnum_GuidBuyList_sqli.py
shopnum_GuidBuyList_sqli.py
py
1,147
python
en
code
1,626
github-code
6
17547705346
from keras.models import Model, load_model, save_model from keras.layers import Input, Dense, Conv2D, Flatten, BatchNormalization, AveragePooling2D from keras.activations import relu, softmax from keras import backend as K from keras.optimizers import Adam, RMSprop, SGD import keras.initializers as initializers class Actor(): def __init__(self, state_size, action_size, hyper_param={}, seed=714): hyper_param = { 'lr': 1e-7, } self.seed = 714 self.state_size = state_size self.action_size = action_size state = Input(shape=self.state_size) advantage = Input(shape=(1, )) old_prediction = Input(shape=(self.action_size, )) x = Conv2D(filters=20, kernel_size=(2, 2), strides=1, activation=relu, padding='same')(state) x = AveragePooling2D()(x) x = Conv2D(filters=20, kernel_size=(4, 4), strides=1, activation=relu, padding='same')(x) x = AveragePooling2D()(x) x = Flatten()(x) x = Dense(units=512, activation=relu, kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=.00002, seed=self.seed), bias_initializer=initializers.Constant(0.1) )(x) actions_prob = Dense(units=action_size, activation=softmax, name='output')(x) model = Model(inputs=[state, advantage, old_prediction], outputs=actions_prob) model.compile(optimizer= # SGD(lr=hyper_param['lr']), # RMSprop(lr=hyper_param['lr']), Adam(lr=hyper_param['lr']), loss=[self.proximal_policy_optimization_loss( advantage=advantage, old_prediction=old_prediction)]) model.summary() self.model = model def proximal_policy_optimization_loss(self, advantage, old_prediction): LOSS_CLIPPING = 0.2 ENTROPY_LOSS = 0.007 def loss(y_true, y_pred): prob = y_true * y_pred old_prob = y_true * old_prediction r = prob/(old_prob + 1e-10) return -K.mean( K.minimum( r * advantage, K.clip( r, min_value=1 - LOSS_CLIPPING, max_value=1 + LOSS_CLIPPING ) * advantage ) + ENTROPY_LOSS * (prob * K.log(prob + 1e-10)) ) return loss def save_model(self, name): self.model.save(name) def load_model(self, name): # state = Input(shape=self.state_size) advantage = Input(shape=(1, )) old_prediction = Input(shape=(self.action_size, )) model = load_model(name, custom_objects={'loss': self.proximal_policy_optimization_loss( advantage=advantage, old_prediction=old_prediction)}) self.model = model
rlalpha/rl-trial
ppo/actor.py
actor.py
py
3,131
python
en
code
0
github-code
6
7161693364
""" Given an unsorted integer array nums, return the smallest missing positive integer. You must implement an algorithm that runs in O(n) time and uses constant extra space. Example 1: Input: nums = [1,2,0] Output: 3 Example 2: Input: nums = [3,4,-1,1] Output: 2 Example 3: Input: nums = [7,8,9,11,12] Output: 1 Constraints: 1 <= nums.length <= 5 * 105 -231 <= nums[i] <= 231 - 1 """ from typing import List class Solution: def remove_negative_nums(self,A): temp = [] for num in A: if num > 0: temp.append(num) return temp def firstMissingPositive(self, nums: List[int]) -> int: n = len(nums) num_of_negative_int = 0 for num in nums: if num<=0: num_of_negative_int += 1 if n == num_of_negative_int: return 1 else: temp =[] temp = self.remove_negative_nums(nums) min_num = min(temp) if min_num != 1: return 1 else: # temp = sorted(temp) for i in range(1,len(temp)): if min_num+1 in temp: min_num += 1 else: return min_num + 1 return min_num + 1
CompetitiveCodingLeetcode/LeetcodeEasy
Hard/FirstMissingPositive_Q41.py
FirstMissingPositive_Q41.py
py
1,307
python
en
code
0
github-code
6
43627672364
from typing import List class Solution: # Two pointers def maximumScore(self, nums: List[int], k: int) -> int: i, j = k, k n = len(nums) res, minVal = nums[k], nums[k] while 0 < i or j < n-1: if i == 0: j += 1 elif j == n-1: i -= 1 elif nums[i-1] < nums[j+1]: j += 1 else: i -= 1 minVal = min(minVal, nums[i], nums[j]) res = max(res, minVal * (j-i+1)) return res # O(N*N) is not good enough :( def maximumScore_own_TLE(self, nums: List[int], k: int) -> int: res = float('-inf') n = len(nums) minVals = [[float('inf')]*n for _ in range(n)] for i in range(n): minVal = float('inf') for j in range(i, -1, -1): minVal = min(minVal, nums[j]) minVals[j][i] = minVal for i in range(k, -1, -1): for j in range(k, len(nums)): res = max(res, (minVals[i][j]) * (j - i + 1)) return res def test(self): test_cases = [ [[1,4,3,7,4,5], 3], [[5,5,4,5,4,1,1,1], 0], ] for nums, k in test_cases: res = self.maximumScore(nums, k) print('res: %s' % res) print('-=' * 30 + '-') if __name__ == '__main__': Solution().test()
MichaelTQ/LeetcodePythonProject
solutions/leetcode_1751_1800/LeetCode1793_MaximumScoreOfAGoodSubarray.py
LeetCode1793_MaximumScoreOfAGoodSubarray.py
py
1,424
python
en
code
0
github-code
6
20774839234
import csv import math import time import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import matplotlib.ticker as mticker initial = time.time() B = 5e-4 RBW = 300 data = {} for k in range(11): for i in range(3): with open('C:\\Users\\uqfgotar\\Documents\\Magnetometry\\Sensitivity_calculations\\Fernando\\254_4\\14stJun' + '\\Spectrum_analyzer\\SSA_' + str("{:02d}".format(k+1)) + '_' + str(i+1) + '.csv') as a: df = csv.reader(a, delimiter=',') df_temp = [] for row in df: df_temp.append(row) df = df_temp[31:] for j in range(len(df)): df[j] = [np.float(df[j][0]), np.float(df[j][1])] data['SSA_' + str(k + 1) + '_exp_' + str(i + 1)] = np.reshape(np.array(df), (-1, 2)) data['SSA_' + str(k+1) + '_exp_' + str(i+1)] = np.array(df) # data['SSA_2_exp_1'] = data['SSA_2_exp_1'][:, 0:2] # data['SSA_8_exp_1'] = data['SSA_8_exp_1'][:, 0:2] Bmin_ref = np.zeros(11) SN_min = np.zeros(11) for k in range(11): SNR = [] mean = np.mean(data['SSA_' + str(k + 1) + '_exp_3'][370:440, 1]) for row in range(751): c = float(data['SSA_' + str(k + 1) + '_exp_2'][row, 1]) - mean SNR.append(c) data['SNR' + str(k + 1)] = np.array(SNR) SN_min = math.pow(10,(data['SNR' + str(k + 1)][370:440].max())/10) Bmin_ref[k] = np.divide(B,(np.sqrt(SN_min*RBW))) for k in range(13): with open('C:\\Users\\uqfgotar\\Documents\\Magnetometry\\Sensitivity_calculations\\Fernando\\254_4\\14stJun' + '\\Network_analyzer\\TRACE' + str("{:02d}".format(k+1)) + '.csv') as a: df = csv.reader(a, delimiter=',') df_temp = [] for row in df: df_temp.append(row) df = df_temp[3:] for j in range(len(df)): df[j] = [np.float(df[j][0]), np.float(df[j][1])] data['TRACE' + str(k + 1)] = np.reshape(np.array(df), (-1, 2)) S21_Snn_ref_ratio = np.zeros(11) Bmin_min = np.zeros(11) for k in range(11): Bmin = [] S21_Snn_ref_ratio[k] = data['TRACE' + str(k + 1)][8, 1]/data['TRACE13'][8, 1] for row in range(751): c = np.multiply(np.sqrt(np.multiply(np.divide(data['TRACE13'][row, 1], data['TRACE' + str(k + 1)][row, 1]), S21_Snn_ref_ratio[k])), Bmin_ref[k]) Bmin.append(c) for j in range(len(Bmin)): Bmin[j] = np.float(Bmin[j]) data['Bmin' + str(k)] = np.asarray(Bmin) data['Bmin_omega' + str(k)] = np.multiply(np.divide(data['Bmin' + str(k)], 1e-12), Bmin_ref[k]) print(data['Bmin_omega' + str(k)].shape) Bmin_min[k] = np.divide(data['Bmin' + str(k)].min(), 1e-6) height = [30, 60, 90, 150, 210, 270, 470, 670, 1000, 2000, 2400] height = np.array(height) axes = plt.gca() xmin = data['TRACE1'][:, 0].min() xmax = data['TRACE1'][:, 0].max() plt.figure(1) for k in range(11): plt.plot(data['TRACE' + str(k + 1)][:,0], data['Bmin_omega' + str(k)], label='$\Delta$z = ' + str(height[k])) plt.xlabel('Frequency (MHz)') plt.ylabel('Sensitivity ($\mu$T/$\sqrt{Hz}$)') axes.set_xlim([(xmin-50000), 2000000]) plt.figure(2) plt.plot(height, Bmin_min, 'ro') plt.xscale('log') plt.xlabel(r'$\Delta$z ($\mu$m)') plt.ylabel('Best sensitivity ($\mu$T/$\sqrt{Hz}$)') final = time.time() print('\n' + str(final - initial) + ' seconds') plt.show()
gotamyers/Flux_conc_height
Read_multiple_data_files.py
Read_multiple_data_files.py
py
3,404
python
en
code
0
github-code
6
34900553836
#!/usr/bin/env python import argparse import collections import operator import os import re UA_RE = re.compile(r'"(Mozilla[^"]*?)"') def extract_log(file_obj, counts): for line in file_obj: m = UA_RE.search(line) if not m: continue counts[m.groups()[0]] += 1 def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--directory', default='.') parser.add_argument('-c', '--count', default=10) args = parser.parse_args() counts = collections.defaultdict(int) for fname in os.listdir(args.directory): if fname.startswith('access.log'): with open(fname) as file_obj: extract_log(file_obj, counts) agents = list( sorted(counts.items(), key=operator.itemgetter(1), reverse=True)) for agent, count in agents[:args.count]: print('{:<7d} {}'.format(count, agent)) if __name__ == '__main__': main()
eklitzke/nginx-ua-extract
extract.py
extract.py
py
946
python
en
code
0
github-code
6
15273361459
from . import TestCase from flask import url_for from .. import db from ...models import User class UsersTest(TestCase): render_templates = False def test_list_users(self): self._create_user() response = self.as_user('get', url_for("users")) self.assertEquals(1, len(response.json['_embedded']['users'])) def test_list_users_paginate(self): for i in range(1, 30): self._create_user(i) response = self.as_user('get', url_for("users")) self.assertEquals(25, len(response.json['_embedded']['users'])) next_link = response.json['_links']['next']['href'] self.assertEquals("/users?page=2", next_link) response = self.as_user('get', next_link) self.assertFalse('next' in response.json['_links']) def test_view_user(self): self._create_user() response = self.as_user('get', url_for("user", id=1)) self.assertEquals("[email protected]", response.json['email']) def test_add_user(self): data = {'email': '[email protected]'} self.as_user('post', url_for("users"), data=data) u = User.query.get(1) self.assertEquals('[email protected]', u.email) def test_add_user_invalid(self): data = {} response = self.as_user('post', url_for("users"), data=data) self.assert400(response) message = "Error in the email field - This field is required." self.assertEquals(message, response.json['message']) def test_edit_user(self): u = self._create_user() data = {'email': '[email protected]', 'first_name': 'Fist Name'} self.as_user('put', url_for("user", id=1), data=data) self.assertEquals('Fist Name', u.first_name) def test_delete_user(self): u = self._create_user() self.as_user('delete', url_for("user", id=1)) self.assertEqual(False, u.active) def _create_user(self, id=1): u = User() u.id = id u.email = "user%[email protected]" % id u.password = "Password" db.session.add(u) return u
juokaz/flask-skeleton
website/api/tests/users_test.py
users_test.py
py
2,120
python
en
code
0
github-code
6
72132172028
import datetime import re import subprocess import sys from typing import Optional def run(argv: list[str]) -> subprocess.CompletedProcess: return subprocess.run( argv, capture_output=True, encoding='utf-8' ) def error(str: str) -> None: sys.stderr.write("%s\n" % str) def get_merge_commits(base: Optional[str], since: Optional[str]) -> list[str]: argv = [ "git", "log", "--pretty=tformat:%h,%p" ] if base: argv.append("%s..HEAD" % base) if since: argv.append('--since=%s' % since) completed = run(argv) re_merge_commit = r'^([0-9a-fA-f]+),([0-9a-fA-F]+) ([0-9a-fA-F]+)$' output = completed.stdout lines = output.splitlines() merge_commits = [] for line in lines: match = re.match(re_merge_commit, line) if match: merge_commits.append(match.group(1)) return merge_commits def find_matches(merge_commits: list[str], patterns: list[str]) -> list[str]: matches = [] for commit_hash in merge_commits: completed = run( [ "git", "show", "--pretty=tformat:%s", commit_hash ] ) first_line = completed.stdout.splitlines()[0] found_match = False for pattern in patterns: match = re.search(pattern, first_line) if match: matches.append(match.group(1)) found_match = True continue if not found_match: error("no match: »%s«" % first_line) return matches def partition_args(raw: list[str]) -> tuple[list[str], dict[str, Optional[str]]]: args = [] flags = {} for arg in raw: if len(arg) > 0 and arg[0] == '-': key, *value = arg.split('=', 1) flags[key] = value[0] if len(value) else None else: args.append(arg) return (args, flags) def parse_relative_date(raw: str) -> Optional[datetime.datetime]: match = re.match(r'([0-9]+)[ ]*([a-z]+)', raw) if not match: return None n = int(match.group(1)) if match.group(2) == 'days': return datetime.datetime.now() - datetime.timedelta(days=n) else: return None def main() -> None: (args, flags) = partition_args(sys.argv[1:]) commit_hash = None patterns = [] since = None if '--today' in flags: today = datetime.date.today() since = '%s 00:00:00' % today.isoformat() patterns = args elif '--since' in flags: raw_since = flags['--since'] if not raw_since: raise Exception dt = parse_relative_date(raw_since) if not dt: raise Exception since = dt.isoformat() patterns = args elif len(args): commit_hash, *patterns = args merge_commits = get_merge_commits(commit_hash, since) print("Number of merge commits: %d" % len(merge_commits)) print() print("Merge commits:") if not merge_commits: print("(none)") for commit in merge_commits: print("- %s" % commit) print() if len(patterns) > 0: print("Matches:") matches = find_matches(merge_commits, patterns) if not matches: print("(none)") for title in matches: print("- %s" % title) if __name__ == "__main__": main()
djfo/dev-tools
merge_commits.py
merge_commits.py
py
3,438
python
en
code
1
github-code
6
19733029074
# -*- coding: utf-8 -*- import logging import requests from bs4 import BeautifulSoup logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger('wb') class Client: def __init__(self): self.session = requests.Session() self.session.headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36', 'Accept-Language': 'ru', } def load_page(self): url = 'https://www.wildberries.ru/catalog/aksessuary/aksessuary-dlya-volos' res = self.session.get(url=url) res.raise_for_status() return res.text def parse_page(self, text: str): soup = BeautifulSoup(text, 'lxml') container = soup.select('div.dtList.i-dtList.j-card-item') for block in container: self.parse_block(block=block) def parse_block(self, block): logger.info(block) logger.info('=' * 100) def run(self): text = self.load_page() self.parse_page(text=text) if __name__ == "__main__": parser = Client() parser.run()
KogameDev/WildberriesParser
main.py
main.py
py
1,056
python
en
code
0
github-code
6
44555345770
from py532lib.mifare import * import time import binascii import hashlib Mifare().SAMconfigure() Mifare().set_max_retries(MIFARE_SAFE_RETRIES) hs = hashlib.md5(b'ffffffffffffff') #這裡之後就會刪掉,不出示明文標籤 hs_md5 = hs.hexdigest() i=0 while True: ID = binascii.hexlify(Mifare().scan_field()) hs_ori = hashlib.md5(ID) # Tag上的 ID 雜湊後物件 hs_ori_md5 = hs_ori.hexdigest() #雜湊值 if hs_ori_md5 == hs_md5: #如果雜湊值相同 #print(i,'ID:',hs_ori_md5) print('open') #可開啟 else:print('No!') #否則不通過 time.sleep(1) i = i + 1
chyijiunn/NFC
09_hash_Tag.py
09_hash_Tag.py
py
660
python
en
code
0
github-code
6
15293278222
import numpy as np import pandas as pd from flask import Flask, render_template, request app = Flask(__name__) df = pd.read_csv("amazon_prime.csv") df = df.fillna("NaN") df["release_year"] = [str(x) for x in df['release_year']] def get_features(feats): input_columns = feats[0] inputs = feats[1] indices = [inputs.index(x) for x in inputs if x != ""] input_columns = [input_columns[idx] for idx in indices] inputs = [inputs[idx] for idx in indices] results = [] for sample in df.iloc: if len(results)==10: break for col in input_columns: features_idx = list(input_columns).index(col) input_ = inputs[features_idx].lower() split = sample[col].lower().split(", ") if input_ not in split: break else: results.append(sample["title"]) return results @app.route("/") def home(): return render_template("home.html") @app.route("/get_data", methods=["POST"]) def get_data(): website = "home.html" message = request.get_data() message = str(message)[2:-1].split("&") category = [x.split("=")[0] for x in message] value = [x.split("=")[1] for x in message] features = {"genre":"", "rating":"", "release_year":"", "duration":"", "actor":"", "director":"", "country":""} for col in category: idx = category.index(col) features[col] = value[idx] message = list(features.values()) if message[1].split("&")[0]=="age_rating": website = "secret_home.html" message[4] = message[4].replace("+", " ") message[5] = message[5].replace("+", " ") feature_names = ["listed_in", "rating", "release_year", "duration", "cast", "director", "country"] features = [feature_names, message] result = get_features(features) if message==['', '', '', '', '', '', '']: return render_template(website, result1="Please enter a value in any of the text boxes") if message[3] != "": input_ = message[3] if not input_.isnumeric(): return render_template(website, result1="Please enter the duration as a number") else: features[1][3] = input_+" min" if message[2] != "": input_ = message[2] if not input_.isnumeric(): return render_template(website, result1="Please enter the release year as a number") input_features = "Results for "+", ".join([x for x in message if x != ""]) if len(result)==0: return render_template(website, result1="Your input did not match any movie or TV show in the database") elif len(result)==1: return render_template(website, input_features=input_features, result1=result[0]) elif len(result)==2: return render_template(website, input_features=input_features, result1=result[0], result2=result[1]) elif len(result)==3: return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2]) elif len(result)==4: return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3]) elif len(result)==5: return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4]) elif len(result)==6: return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5]) elif len(result)==7: return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5], result7=result[6]) elif len(result)==8: return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5], result7=result[6], result8=result[7]) elif len(result)==9: return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5], result7=result[6], result8=result[7], result9=result[8]) elif len(result)>=10: return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5], result7=result[6], result8=result[7], result9=result[8], result10=result[9]) if __name__=='__main__': app.run(debug=True)
daBawse167/amazon-prime
app.py
app.py
py
4,782
python
en
code
0
github-code
6
37570271122
import re def fonct(text): x = re.compile(r'[A-Za-z0-9]{8,}') mo = x.search(text) if mo is not None: x = re.compile(r'\d+') y = re.compile(r'[a-z]+') z = re.compile(r'[A-Z]+') mo1 = x.search(text) mo2 = y.search(text) mo3 = z.search(text) if mo1 is None or mo2 is None or mo3 is None: print("FALSE 2") else: print("CORRECT") else: print("FALSE 1") if __name__ == '__main__': fonct("Che145dyfefe")
bishkou/Automate-the-boring-stuff-with-python
Regex/PhoneEmailRegEx.py
PhoneEmailRegEx.py
py
520
python
en
code
1
github-code
6
37785863928
#!/usr/bin/env python3 # Modules libraries from PyInquirer import Separator from PyInquirer.prompts import list as PyInquirer_prompts_list from PyInquirer.prompts.common import if_mousedown from PyInquirer.prompts.list import basestring from prompt_toolkit.layout.controls import TokenListControl from prompt_toolkit.token import Token # pylint: skip-file # Override with https://github.com/CITGuru/PyInquirer/pull/88 class InquirerControl(TokenListControl): def __init__(self, choices, **kwargs): self.selected_option_index = 0 self.answered = False self.choices = choices self._init_choices(choices) super(InquirerControl, self).__init__(self._get_choice_tokens, **kwargs) def _init_choices(self, choices, default=None): # helper to convert from question format to internal format self.choices = [] # list (name, value, disabled) searching_first_choice = True for i, c in enumerate(choices): if isinstance(c, Separator): self.choices.append((c, None, None)) else: if isinstance(c, basestring): self.choices.append((c, c, None)) else: name = c.get('name') value = c.get('value', name) disabled = c.get('disabled', None) self.choices.append((name, value, disabled)) if searching_first_choice: self.selected_option_index = i # found the first choice searching_first_choice = False @property def choice_count(self): return len(self.choices) def _get_choice_tokens(self, cli): tokens = [] T = Token def append(index, choice): selected = (index == self.selected_option_index) @if_mousedown def select_item(cli, mouse_event): # pragma: no cover # bind option with this index to mouse event self.selected_option_index = index self.answered = True cli.set_return_value(None) if isinstance(choice[0], Separator): tokens.append((T.Separator, ' %s\n' % choice[0])) else: tokens.append( (T.Pointer if selected else T, ' \u276f ' if selected else ' ')) if selected: tokens.append((Token.SetCursorPosition, '')) if choice[2]: # disabled tokens.append((T.Selected if selected else T, '- %s (%s)' % (choice[0], choice[2]))) else: try: tokens.append( (T.Selected if selected else T, str(choice[0]), select_item)) except: # pragma: no cover tokens.append( (T.Selected if selected else T, choice[0], select_item)) tokens.append((T, '\n')) # prepare the select choices for i, choice in enumerate(self.choices): append(i, choice) tokens.pop() # Remove last newline. return tokens def get_selection(self): return self.choices[self.selected_option_index] # Patcher class class Patcher: # Constructor def __init__(self): # Apply library patches PyInquirer_prompts_list.InquirerControl = InquirerControl
starr-dusT/gitlab-ci
gitlabci_local/package/patcher.py
patcher.py
py
3,488
python
en
code
0
github-code
6
10220508565
from time import time from nazurin.database import Database from nazurin.models import Illust from .api import Zerochan from .config import COLLECTION patterns = [ # https://www.zerochan.net/123456 r"zerochan\.net/(\d+)", # https://s1.zerochan.net/Abcdef.600.123456.jpg # https://static.zerochan.net/Abcdef.full.123456.jpg r"zerochan\.net/\S+\.(\d+)\.\w+$", ] async def handle(match) -> Illust: post_id = match.group(1) api = Zerochan() db = Database().driver() collection = db.collection(COLLECTION) illust = await api.view(post_id) illust.metadata["collected_at"] = time() await collection.insert(int(post_id), illust.metadata) return illust
y-young/nazurin
nazurin/sites/zerochan/interface.py
interface.py
py
702
python
en
code
239
github-code
6
1822810123
#Symmetric Binary Tree #option-1 - using queue class Node(): def __init__(self,data): self.data = data self.lchild = None self.rchild = None ##class BSTree(): ## #def __init__(self): ## #self.root = None def insertlevelordertree(arr,root,i,n): if i < n: if arr[i] is not None: nd = Node(arr[i]) root = nd #print("data:",root.data) #print("here") root.lchild = insertlevelordertree(arr,root.lchild,2*i+1,n) root.rchild = insertlevelordertree(arr,root.rchild,2*i+2,n) return root def printTree(printType): if not root: print("Empty Tree") else: if printType == "PreOrder": printPreOrder(root,level=0) if printType == "LevelOrder": printLevelOrder(root,level=0) def printPreOrder(cur_node,level): print("****0") #if cur_node: print("****1") print(cur_node.data) print("****2") if cur_node.lchild: printPreOrder(cur_node.lchild,level+1) print("****3") if cur_node.rchild: printPreOrder(cur_node.rchild,level+1) print("****4") if not cur_node.lchild and not cur_node.rchild: print("****5") def printLevelOrder(cur_node,level): mq = [cur_node] l = [] while mq: temp = [] ans = [] for i in mq: temp.append(i.data) #print("Cur Node:",temp) if i.lchild: ans.append(i.lchild) #print("\t LeftChild:",i.lchild.data) if i.rchild: ans.append(i.rchild) #print("\t RightChild:",i.rchild.data) #print("\n*******\n") for i in ans: print(i.data) ## print("\n*******\n") l.append(temp) mq = ans print("Level Order Tree:",l) def unival(cur_node): ar = [cur_node] l = [] count = 0 while ar: temp = [] ans = [] for i in ar: temp.append(i.data) print("root:",i.data) if i.lchild: ans.append(i.lchild) val = ans[0].data print("\tlchild:",val) if i.rchild: ans.append(i.rchild) print("\trchild:",ans[1].data) if val == ans[1].data: count += 1 else: count -= 1 if val==temp[0]: count += 1 print("Current Count:",count) val = 0 l.append(temp) ar =ans def checkSymmetry(cur_left,cur_right): if not cur_left and not cur_right: return True else: if cur_left and cur_right: if cur_left.data == cur_right.data: return checkSymmetry(cur_left.lchild, cur_right.rchild) and checkSymmetry(cur_left.rchild, cur_right.lchild) else: return False def pathSum(cur_node,targetSum): print("****0,TargetSum:{}".format(targetSum)) if not cur_node: print("****1") return False if not cur_node.lchild and not cur_node.rchild and cur_node.data == targetSum: print("*****2") return True print("****3") targetSum -= cur_node.data print("Cur_Node:{},targetSum:{}".format(cur_node.data,targetSum)) print("*****4") return pathSum(cur_node.lchild,targetSum) or pathSum(cur_node.rchild,targetSum) arr = [5,1,5,5,5,None,5] n = len(arr) root = None root = insertlevelordertree(arr,None,0,n) printTree("LevelOrder") print("Is the tree symmetric?: ",checkSymmetry(root,root)) print("Count of Unival Subtree:",unival(root)) ##arr = [1,2,2,3,4,4,3] ##n = len(arr) ##root = None ##root = insertlevelordertree(arr,None,0,n) ##printTree("LevelOrder") ##print("Is the tree symmetric?: ",checkSymmetry(root,root)) ## ##ar2 = [1,2,2,None,3,None,3] ##n = len(ar2) ##root = None ##root = insertlevelordertree(ar2,None,0,n) ##printTree("LevelOrder") ##print("Is the tree symmetric?: ",checkSymmetry(root,root)) ## ##ar3 = [5,4,8,11,None,13,4,7,2,None,None,None,1] ##cSum = 0 ##targetSum = 22 ##n = len(ar3) ##root = None ##root = insertlevelordertree(ar3,None,0,n) ##printTree("PreOrder") ##print("_______________________________________________") ##print("\nPathSum:",pathSum(root,targetSum)) ##print("\n\n") ##if cSum != targetSum: ## ## pathSum(root,targetSum,cSum) ##else: ## print("True")
sushasru/LeetCodeCrunch
SymmetricBinaryTree.py
SymmetricBinaryTree.py
py
4,756
python
en
code
0
github-code
6
70357157629
# coding=utf-8 import numpy as np import matplotlib.pyplot as plt MapL = 15 # Chessboard size WinN = 5 # "Five"-in-a-row step = 0 # Steps taken steps = [] # Coordinates of each step end_flag = 0 # Game end flag board = np.zeros((MapL,MapL),dtype=np.int64) # chessboard mode = 4 # modes: 0:player-player, 1:PC-player, 2:player-PC, 3:PC-PC # parameters 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 coeffs = [np.array([[-12, 0,-35,-15,-34,-25,-1000,-45,-1000,-30,-30,-1000,-9500,-9500,-9500,-9500,-9500,-9500,-9500,-90000], [ 10, 3, 30, 15, 29, 12, 190, 55, 180, 20, 20, 4000, 140, 135, 130, 130, 200, 135, 135, 90000]]), np.array([[-15, 0,-35,-15,-34,-25,-1000,-40,-1000,-30,-30,-1000,-9500,-9500,-9500,-9500,-9500,-9500,-9500,-30000], [ 10,10, 30, 15, 29, 12, 195, 50, 180, 20, 20, 4000, 140, 135, 130, 130, 200, 135, 135, 40000]])] #numsall = np.zeros((2,len(coeffs[0][0]))) def judge(l,c,winn): '''judge if a player wins by taking a move (l,c)''' # line # count = 0 i = 0 while i < MapL-1 and count < WinN-1: if board[l][i] and board[l][i] == board[l][i+1]: count += 1 else: count = 0 i += 1 if count == WinN-1: return 1 # column # count = 0 i = 0 while i < MapL-1 and count < WinN-1: if board[i][c] and board[i][c] == board[i+1][c]: count += 1 else: count = 0 i += 1 if count == WinN-1: return 1 # Principal diagonal # count = 0 i = 0 l_ = l - min(l,c); c_ = c - min(l,c) while i+l_<MapL-1 and i+c_<MapL-1 and count<WinN-1: if board[i+l_][i+c_] and board[i+l_+1][i+c_+1] == board[i+l_][i+c_]: count += 1 else: count = 0 i += 1 if count == WinN-1: return 1 # Subdiagonal # count = 0 i = 0 while c > 0 and l < MapL-1: l += 1; c -= 1 while l-i>0 and i+c<MapL and count<WinN-1: if board[-i+l][i+c] and board[-i+l-1][i+c+1] == board[-i+l][i+c]: count += 1 else: count = 0 i += 1 if count == WinN-1: return 1 return 0 def auto(player=2,coeff=0): '''computer's move''' max_score = -np.inf ymax = 1; xmax = 1 # Calculate the scores at each point for y in range(MapL): for x in range(MapL): if not board[y][x]: cd = abs(y-MapL/2+0.5) + abs(x-MapL/2+0.5) if (not step and cd>3) or (step and not np.any(board[max(y-2,0):min(y+3,MapL),max(x-2,0):min(x+3,MapL)])): score = -np.inf #print(" ",end='') else: board[y][x] = player scores = score_calc(coeffs[coeff],player) score = scores[0] - cd + np.random.randint(-6,5) # my score in this move score_opp = scores[1] # the opponent's score in this move board[y][x] = 3 - player score2 = score_calc(coeffs[coeff],player)[0] # my score if the opponent take this move # Treatment of 33, 34 and 44 if coeffs[coeff][0][12]*3<score_opp<coeffs[coeff][0][6]*0.5+coeffs[coeff][0][12]: score -= coeffs[coeff][0][6] if 1.5<score2/coeffs[coeff][0][6]<2.5: score -= coeffs[coeff][0][6]*0.25 elif 1.9<score2/coeffs[coeff][0][12]<2.1 or 0.5<(score2-coeffs[coeff][0][12])/coeffs[coeff][0][6]<1.5: score -= coeffs[coeff][0][6]*0.5 elif 0.5<score2/coeffs[coeff][0][19]<3.5: score -= coeffs[coeff][0][12] #print('%5d' % score,end='') if max_score < score: max_score = score; ymax = y+1; xmax = x+1 board[y][x] = 0 else: pass #print(' ['+'%s'%chr(21*board[y][x]+45)+']',end='') #print("") #print("B:",end='') #for j in range(len(numsall[0])): print('%2d'%int(numsall[0][j]),end=' ') #print("\nW:",end='') #for j in range(len(numsall[0])): print('%2d'%int(numsall[1][j]),end=' ') #print("") return ymax,xmax def score_calc(coeff,player=2): '''calculate total score''' nums = np.zeros((2,len(coeffs[0][0]))) def one_calc(a): '''calculate each list''' l = len(a) a = a.tolist() for i in range(l-2): if a[i:i+3]==[0,1,0]: nums[0][0]+=1 elif a[i:i+3]==[2,1,0] or a[i:i+3]==[0,1,2]: nums[0][1]+=1 elif a[i:i+3]==[0,2,0]: nums[1][0]+=1 elif a[i:i+3]==[1,2,0] or a[i:i+3]==[0,2,1]: nums[1][1]+=1 for i in range(l-3): if a[i:i+4]==[0,1,1,0]: nums[0][2]+=1 elif a[i:i+4]==[2,1,1,0] or a[i:i+4]==[0,1,1,2]: nums[0][3]+=1 elif a[i:i+4]==[0,2,2,0]: nums[1][2]+=1 elif a[i:i+4]==[1,2,2,0] or a[i:i+4]==[0,2,2,1]: nums[1][3]+=1 for i in range(l-4): if a[i:i+5]==[0,1,0,1,0]: nums[0][4]+=1 elif a[i:i+5]==[0,1,0,1,2] or a[i:i+5]==[2,1,0,1,0]: nums[0][5]+=1 elif a[i:i+5]==[0,1,1,1,0]: nums[0][6]+=1 elif a[i:i+5]==[0,1,1,1,2] or a[i:i+5]==[2,1,1,1,0]: nums[0][7]+=1 elif a[i:i+5]==[1,1,1,1,1]: nums[0][-1]+=1 elif a[i:i+5]==[0,2,0,2,0]: nums[1][4]+=1 elif a[i:i+5]==[0,2,0,2,1] or a[i:i+5]==[1,2,0,2,0]: nums[1][5]+=1 elif a[i:i+5]==[0,2,2,2,0]: nums[1][6]+=1 elif a[i:i+5]==[0,2,2,2,1] or a[i:i+5]==[1,2,2,2,0]: nums[1][7]+=1 elif a[i:i+5]==[2,2,2,2,2]: nums[1][-1]+=1 if l>=6: for i in range(l-5): if a[i:i+6]==[0,1,0,1,1,0] or a[i:i+6]==[0,1,1,0,1,0]: nums[0][8]+=1 elif a[i:i+6]==[2,1,0,1,1,0] or a[i:i+6]==[0,1,1,0,1,2]: nums[0][9]+=1 elif a[i:i+6]==[2,1,1,0,1,0] or a[i:i+6]==[0,1,0,1,1,2]: nums[0][10]+=1 elif a[i:i+6]==[0,1,1,1,1,0]: nums[0][11]+=1 elif a[i:i+6]==[2,1,1,1,1,0] or a[i:i+6]==[0,1,1,1,1,2]: nums[0][12]+=1 elif a[i:i+6]==[1,1,1,0,1,1] or a[i:i+6]==[1,1,0,1,1,1]: nums[0][13]+=1 elif a[i:i+6]==[0,2,0,2,2,0] or a[i:i+6]==[0,2,2,0,2,0]: nums[1][8]+=1 elif a[i:i+6]==[1,2,0,2,2,0] or a[i:i+6]==[0,2,2,0,2,1]: nums[1][9]+=1 elif a[i:i+6]==[0,2,2,0,2,1] or a[i:i+6]==[0,2,0,2,2,1]: nums[1][10]+=1 elif a[i:i+6]==[0,2,2,2,2,0]: nums[1][11]+=1 elif a[i:i+6]==[1,2,2,2,2,0] or a[i:i+6]==[0,2,2,2,2,1]: nums[1][12]+=1 elif a[i:i+6]==[2,2,2,0,2,2] or a[i:i+6]==[2,2,0,2,2,2]: nums[1][13]+=1 if l>=7: for i in range(l-6): if a[i:i+7]==[0,1,1,1,0,1,0] or a[i:i+7]==[0,1,0,1,1,1,0]: nums[0][16]+=1 elif a[i:i+7]==[2,1,1,0,1,1,2] or a[i:i+7]==[2,1,0,1,1,1,2] or a[i:i+7]==[2,1,1,1,0,1,2]: nums[0][13]+=1 elif a[i:i+7]==[2,1,1,0,1,1,0] or a[i:i+7]==[0,1,1,0,1,1,2]: nums[0][14]+=1 elif a[i:i+7]==[0,1,1,0,1,1,0] or a[i:i+7]==[0,1,1,1,0,1,2] or a[i:i+7]==[2,1,0,1,1,1,0]: nums[0][15]+=1 elif a[i:i+7]==[0,1,0,1,1,1,2] or a[i:i+7]==[2,1,1,1,0,1,0]: nums[0][17]+=1 elif a[i:i+7]==[0,2,2,2,0,2,0] or a[i:i+7]==[0,2,0,2,2,2,0]: nums[1][16]+=1 elif a[i:i+7]==[1,2,2,0,2,2,1] or a[i:i+7]==[1,2,0,2,2,2,1] or a[i:i+7]==[1,2,2,2,0,2,1]: nums[1][13]+=1 elif a[i:i+7]==[1,2,2,0,2,2,0] or a[i:i+7]==[0,2,2,0,2,2,1]: nums[1][14]+=1 elif a[i:i+7]==[0,2,2,0,2,2,0] or a[i:i+7]==[0,2,2,2,0,2,1] or a[i:i+7]==[1,2,0,2,2,2,0]: nums[1][15]+=1 elif a[i:i+7]==[0,2,0,2,2,2,1] or a[i:i+7]==[1,2,2,2,0,2,0]: nums[1][17]+=1 for i in range(MapL): # Calculate row and column one_calc(board[i]) one_calc(board[:,i]) for i in range(-MapL+5,MapL-4): # Calculate the main and sub diagonals one_calc(np.diag(board,i)) one_calc(np.diag(np.flip(board,axis=0),i)) nums[:,0] -= nums[:,4]*2 + nums[:,8]+nums[:,10]+nums[:,16]+nums[:,17] nums[:,1] -= nums[:,5] + nums[:,9] nums[:,2] -= nums[:,8] + nums[:,9]+nums[:,14]+nums[:,15] nums[:,3] -= nums[:,10] + nums[:,14] nums[:,6] -= nums[:,15] + nums[:,16] nums[:,7] -= nums[:,17] #global numsall #numsall = nums if player==2: return np.sum(nums*coeff), np.sum(nums*np.flip(coeff,axis=0)) else: return np.sum(nums*np.flip(coeff,axis=0)), np.sum(nums*coeff) def button(event): '''event handler & modes''' if not end_flag: try: if mode == 0: move(round(event.ydata),round(event.xdata)) elif mode == 1: if not step % 2: y,x = auto(1,1); move(y,x) # auto(1-B 2-W, 0-Old 1-New) else: move(round(event.ydata),round(event.xdata)) elif mode == 2: if not step % 2: move(round(event.ydata),round(event.xdata)) else: y,x = auto(2); move(y,x) elif mode == 3: if not step % 2: y,x = auto(1); move(y,x) else: y,x = auto(2,1); move(y,x) except: pass def move(i,j): '''take a move''' global step,board,end_flag if step == MapL**2: end_flag = 2 try: if not board[i-1][j-1]: board[i-1][j-1] = step%2 + 1 step += 1 steps.append([i,j]) if judge(i-1,j-1,WinN): end_flag = 1 show() except: pass def show(): '''show the chessboard''' global step,board colors = ['w','k','w'] names = ['player','PC'] adsize = 0 if mode == 3 else step % 2 plt.clf() fig = plt.figure(num=1) mngr = plt.get_current_fig_manager() mngr.window.setGeometry(0+adsize,30,701+adsize,701) # position and size of the window fig.canvas.mpl_connect('button_press_event', button) plt.xlim(0.5,MapL+0.5); plt.ylim(0.5,MapL+0.5) for i in range(MapL): for j in range(MapL): if board[i][j]: plt.scatter(j+1,i+1, c=colors[board[i][j]],s=520*12/(MapL-1), linewidths=1,edgecolors='k',zorder=128) if step: plt.scatter(steps[-1][1],steps[-1][0],s=100,c='r',lw=5,marker='+',zorder=256) if MapL==15: plt.scatter([4,4,8,12,12],[4,12,8,4,12], c='k',s=10,zorder=2) else: plt.scatter([4,4,MapL-3,MapL-3],[4,MapL-3,4,MapL-3], c='k',s=10,zorder=2) plt.plot([1,1,MapL,MapL,1],[1,MapL,MapL,1,1],c='k',lw=1) plt.fill([1,MapL,MapL,1],[1,1,MapL,MapL],c='tan',alpha=0.5,zorder=0) plt.fill([-MapL,2*MapL,2*MapL,-MapL],[-MapL,-MapL,2*MapL,2*MapL],c='tan',alpha=0.4,zorder=1) plt.grid(True,ls='--',c='k',zorder=1) plt.text(MapL/2,MapL+1.5, "Step:"+str(step)+" Black:"+names[mode & 1]+" "+str(result[0])+":"+str(result[1])+" White:"+names[(mode&2)//2], fontsize=15,ha="center") ax = plt.gca() ax.set_xticks(range(1,MapL+1)) ax.set_yticks(range(1,MapL+1)) for edge in ['left','right','top','bottom']: ax.spines[edge].set_visible(False) if end_flag: if end_flag == 2: string = "Draw!" else: string = "Black Wins" if step%2 else "White Wins" plt.text(MapL/2+0.5,MapL+0.5,string,fontsize=20,c='r',va="center",ha="center") if mode & (step % 2 + 1): if not step: plt.pause(0.01) fig.canvas.draw_idle() fig.canvas.start_event_loop(0.1) if not end_flag: plt.clf() button(1) else: plt.show() def init(): '''Initialization interface''' def choice(event): global mode mode = 4 - round(event.ydata) if mode in [0,1,2,3] and 2.3 < event.xdata < 7.7: plt.close(0) fig = plt.figure(num=0) mngr = plt.get_current_fig_manager() mngr.window.setGeometry(100,100,600,600) fig.canvas.mpl_connect('button_press_event', choice) plt.xlim(0,10); plt.ylim(0,10) plt.xticks([]); plt.yticks([]) plt.text(5,8," Gobang ",fontsize=25,color="w",bbox=(dict(fc="k",alpha=0.5)), va="center",ha="center") plt.text(5,5.7,"Click the chessboard to play.\n Close the chessboard to refresh\n or start a new game.",fontsize=13,va="center",ha="center") plt.text(5,4,'● player vs ○ player',fontsize=15,bbox=dict(fc=(1, 0.85, 0.7)),va="center",ha="center") plt.text(5,2,'● player vs ○ PC ', fontsize=15,bbox=dict(fc=(1, 0.85, 0.7)),va="center",ha="center") plt.text(5,3,'● PC vs ○ player', fontsize=15,bbox=dict(fc=(1, 0.85, 0.7)),va="center",ha="center") plt.text(5,1,'● PC vs ○ PC ', fontsize=15,bbox=dict(fc=(1, 0.85, 0.7)),va="center",ha="center") img = plt.imread("go.jpg") plt.imshow(img,extent=[0,10,5,10]) plt.show() if mode == 4: exit() if __name__ == "__main__": result = [0,0] init() while 1: show() if end_flag: if end_flag == 2: pass elif step % 2: result[0] += 1 else: result[1] += 1 end_flag = 0 step = 0 steps.clear() board[board != 0] = 0 print("\n----- SCORE -----\nBlack",result[0],'-',result[1],"White\n"+"-"*17)
BetaGem/Games
gobang.py
gobang.py
py
13,951
python
en
code
2
github-code
6
16119409500
import customtkinter as tk tk.set_appearance_mode("dark") janela = tk.CTk() janela.title("Janela 1") janela.geometry("400x350") janela.configure(fg_color="grey31") janela.resizable(width=False,height=False) colunas = list(range(13)) linhas = list(range(13)) janela.grid_columnconfigure(colunas, weight=1) janela.grid_rowconfigure(linhas, weight=1) def verificar(): num1 = int(caixa1.get()) num2 = int(caixa2.get()) media = (num1 + num2) / 2 if media >= 6: texto1.configure(text="Aprovado", text_color="green") else: texto1.configure(text="Reprovado", text_color="red") texto= tk.CTkLabel(janela, text="Digite...") texto.grid(row=6, column=6) caixa1=tk.CTkEntry(janela, placeholder_text="Digite a primeira nota", width=250, height=50) caixa1.grid(row=7, column=6) caixa2=tk.CTkEntry(janela, placeholder_text="Digite a segunda nota", width=250, height=50) caixa2.grid(row=8, column=6) btn1= tk.CTkButton(janela, text="Clique Aqui", command= verificar, width=100, height=50, fg_color='DarkTurquoise') btn1.grid (row=9, column=6) texto1= tk.CTkLabel(janela, text="") texto1.grid(row=10, column=6) janela.mainloop()
dudasaanches/interface-grafica
1.py
1.py
py
1,201
python
pt
code
0
github-code
6