seq_id
string
text
string
repo_name
string
sub_path
string
file_name
string
file_ext
string
file_size_in_byte
int64
program_lang
string
lang
string
doc_type
string
stars
int64
dataset
string
pt
string
api
list
276138230
import asyncio import sys import socket import os import time port = sys.argv[1] dirs = '/Users/mario/IdeaProjects/pythonLearn/pytest/file/p1/server/' if not os.path.exists(dirs): os.makedirs(dirs) async def echo(r, w): msg = await r.read(1024) addr = w.get_extra_info('peername') print("Received %r from %r" % (msg.decode("UTF-8"), addr)) if msg.decode("UTF-8") == "__EXIT__": print("client ask closing the connection") w.close() return if msg: count = len(os.listdir(dirs)) newText = dirs + "echo_message_" + str(count + 1) + '.txt' content = str(time.time()) + "\n" + str(addr[0]) + ":" + str(addr[1]) + "\n" + str(msg.decode("UTF-8")) with open(newText, 'w') as f: f.write(content) w.write(msg) await w.drain() w.close() print(socket.gethostname()) loop = asyncio.get_event_loop() server_listen = asyncio.start_server(echo, socket.gethostname(), port, loop=loop) server = loop.run_until_complete(server_listen) try: loop.run_forever() except KeyboardInterrupt: pass # Close the server server.close() loop.run_until_complete(server.wait_closed()) loop.close()
null
pytest/pytest_echoserver.py
pytest_echoserver.py
py
1,187
python
en
code
null
code-starcoder2
83
[ { "api_name": "sys.argv", "line_number": 7, "usage_type": "attribute" }, { "api_name": "os.path.exists", "line_number": 9, "usage_type": "call" }, { "api_name": "os.path", "line_number": 9, "usage_type": "attribute" }, { "api_name": "os.makedirs", "line_number": 10, "usage_type": "call" }, { "api_name": "os.listdir", "line_number": 24, "usage_type": "call" }, { "api_name": "time.time", "line_number": 26, "usage_type": "call" }, { "api_name": "socket.gethostname", "line_number": 36, "usage_type": "call" }, { "api_name": "asyncio.get_event_loop", "line_number": 38, "usage_type": "call" }, { "api_name": "asyncio.start_server", "line_number": 39, "usage_type": "call" }, { "api_name": "socket.gethostname", "line_number": 39, "usage_type": "call" } ]
641150022
from django.contrib import admin from django.urls import path from . import views urlpatterns = [ path('<int:blog_id>',views.detail,name='detail'), path('edit/<int:blog_id>',views.edit, name="edit"), path('delete/<int:blog_id>',views.delete, name="delete"), path('newblog/', views.blogpost, name="newblog"), path('search', views.search, name="search"), ] #name์€ html์—์„œ ์ด๋ฆ„์œผ๋กœ ์‚ฌ์šฉํ•˜๊ธฐ์œ„ํ•ด ์ž‘์„ฑ.
null
blog/urls.py
urls.py
py
436
python
en
code
null
code-starcoder2
83
[ { "api_name": "django.urls.path", "line_number": 5, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 6, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 7, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 8, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 9, "usage_type": "call" } ]
522417123
#!/usr/bin/env python3 from pydarknet import Detector, Image import argparse import cv2 import os if __name__ == "__main__": parser = argparse.ArgumentParser(description='Process an image.') parser.add_argument('path', metavar='image_path', type=str, help='Path to source image') parser.add_argument('output', metavar='output_path', type=str, help='Data file for output') args = parser.parse_args() darknet_path = os.environ['DARKNET_HOME'] config = os.path.join(darknet_path, 'cfg/yolov3.cfg') weights = os.path.join(darknet_path, 'yolov3.weights') coco = os.path.join(darknet_path, 'cfg/coco.data') net = Detector(bytes(config, encoding="utf-8"), bytes(weights, encoding="utf-8"), 0, bytes(coco, encoding="utf-8")) img = cv2.imread(args.path) img2 = Image(img) # r = net.classify(img2) results = net.detect(img2) output = open(args.output, 'w') for cat, score, bounds in results: x, y, w, h = bounds output.write("%s at X: %d\tY: %d\n" % (str(cat), x, y)) cv2.rectangle(img, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), (255, 0, 0), thickness=2) cv2.putText(img,str(cat.decode("utf-8")),(int(x),int(y)),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0)) print('%d Detections logged in %s' % (len(results), args.output)) cv2.imshow("output", img) cv2.imwrite("output.jpg", img) cv2.waitKey(0)
null
yolo_image.py
yolo_image.py
py
1,423
python
en
code
null
code-starcoder2
83
[ { "api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call" }, { "api_name": "os.environ", "line_number": 13, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 14, "usage_type": "call" }, { "api_name": "os.path", "line_number": 14, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 15, "usage_type": "call" }, { "api_name": "os.path", "line_number": 15, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 16, "usage_type": "call" }, { "api_name": "os.path", "line_number": 16, "usage_type": "attribute" }, { "api_name": "pydarknet.Detector", "line_number": 18, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 20, "usage_type": "call" }, { "api_name": "pydarknet.Image", "line_number": 22, "usage_type": "call" }, { "api_name": "cv2.rectangle", "line_number": 32, "usage_type": "call" }, { "api_name": "cv2.putText", "line_number": 33, "usage_type": "call" }, { "api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 33, "usage_type": "attribute" }, { "api_name": "cv2.imshow", "line_number": 36, "usage_type": "call" }, { "api_name": "cv2.imwrite", "line_number": 37, "usage_type": "call" }, { "api_name": "cv2.waitKey", "line_number": 38, "usage_type": "call" } ]
601122953
import logging from backbone_server.errors.missing_key_exception import MissingKeyException from backbone_server.errors.integrity_exception import IntegrityException from backbone_server.study.gets import StudiesGet from backbone_server.study.get import StudyGet from backbone_server.study.put import StudyPut from backbone_server.controllers.base_controller import BaseController from backbone_server.controllers.decorators import apply_decorators @apply_decorators class StudyController(BaseController): def download_studies(self, start=None, count=None, user=None, auths=None): """ fetches studies :param start: for pagination start the result set at a record x :type start: int :param count: for pagination the number of entries to return :type count: int :rtype: Studies """ get = StudiesGet(self.get_connection()) studies = get.get() return studies, 200 def download_study(self, study_name, user=None, auths=None): """ fetches a study :param study_name: ID of study to fetch :type study_name: str :rtype: Study """ get = StudyGet(self.get_connection()) study = None retcode = 200 try: study = get.get(study_name) except MissingKeyException as dme: logging.getLogger(__name__).debug( "update_study: {}".format(repr(dme))) retcode = 404 study = str(dme) return study, retcode def update_study(self, study_name, study, user=None, auths=None): """ updates a study :param study_name: ID of study to update :type study_name: str :param study: :type study: dict | bytes :rtype: Study """ retcode = 200 updated_study = None try: put = StudyPut(self.get_connection()) updated_study = put.put(study_name, study) except IntegrityException as dme: logging.getLogger(__name__).debug( "update_study: {}".format(repr(dme))) retcode = 422 updated_study = str(dme) except MissingKeyException as dme: logging.getLogger(__name__).debug( "update_study: {}".format(repr(dme))) retcode = 404 updated_study = str(dme) return updated_study, retcode
null
server/backbone_server/controllers/study_controller.py
study_controller.py
py
2,455
python
en
code
null
code-starcoder2
83
[ { "api_name": "backbone_server.controllers.base_controller.BaseController", "line_number": 15, "usage_type": "name" }, { "api_name": "backbone_server.study.gets.StudiesGet", "line_number": 29, "usage_type": "call" }, { "api_name": "backbone_server.study.get.StudyGet", "line_number": 45, "usage_type": "call" }, { "api_name": "backbone_server.errors.missing_key_exception.MissingKeyException", "line_number": 51, "usage_type": "name" }, { "api_name": "logging.getLogger", "line_number": 52, "usage_type": "call" }, { "api_name": "backbone_server.study.put.StudyPut", "line_number": 75, "usage_type": "call" }, { "api_name": "backbone_server.errors.integrity_exception.IntegrityException", "line_number": 78, "usage_type": "name" }, { "api_name": "logging.getLogger", "line_number": 79, "usage_type": "call" }, { "api_name": "backbone_server.errors.missing_key_exception.MissingKeyException", "line_number": 83, "usage_type": "name" }, { "api_name": "logging.getLogger", "line_number": 84, "usage_type": "call" }, { "api_name": "backbone_server.controllers.decorators.apply_decorators", "line_number": 14, "usage_type": "name" } ]
386613464
import django_tables2 as tables from django.utils.safestring import mark_safe from website.models import Instancia, Estudiante class AsignaturasTable(tables.Table): nombre = tables.Column(empty_values=()) carrera = tables.Column(accessor='asignatura.malla.carrera.nombre', verbose_name='Carrera') def render_nombre(self, record): return mark_safe('<a href="/curso/{0}">{1}</a>' .format(record.pk, record.asignatura.nombre)) class Meta: model = Instancia row_attrs = { 'class': "teal-text", 'href': lambda record: '/curso/' + str(record.pk) } attrs = {'class': 'stripped'} fields = ('nombre', 'carrera', 'ano', 'semestre', 'estado') class EstudiantesTable(tables.Table): nombre = tables.Column(empty_values=()) estado = tables.Column(empty_values=()) def render_nombre(self, record): return mark_safe('<a href="/ficha/{0}">{1}</a>' .format(record.pk, record.nombre)) def render_estado(self, record): pk = self.context['request'].path.rsplit('/')[2] print(pk) return record.inscripciones.get(instancia__pk=pk).estado.nombre class Meta: model = Estudiante row_attrs = { 'class': "teal-text" } attrs = {'class': 'stripped'} fields = ('nombre', 'paterno', 'materno', 'estado') class EstudianteCursos(tables.Table): nombre = tables.Column(empty_values=()) estado = tables.Column(empty_values=()) carrera = tables.Column(accessor='asignatura.malla.carrera.nombre', verbose_name='Carrera') def render_nombre(self, record): return record.asignatura.nombre def render_estado(self, record): user = self.context['request'].user.estudiante return record.inscripciones.get(estudiante=user).estado.nombre class Meta: model = Instancia row_attrs = { 'class': "teal-text", 'href': lambda record: '/curso/' + str(record.pk) } attrs = {'class': 'stripped'} fields = ('nombre', 'carrera', 'ano', 'semestre', 'estado')
null
website/tables.py
tables.py
py
2,420
python
en
code
null
code-starcoder2
83
[ { "api_name": "django_tables2.Table", "line_number": 6, "usage_type": "attribute" }, { "api_name": "django_tables2.Column", "line_number": 7, "usage_type": "call" }, { "api_name": "django_tables2.Column", "line_number": 8, "usage_type": "call" }, { "api_name": "django.utils.safestring.mark_safe", "line_number": 12, "usage_type": "call" }, { "api_name": "website.models.Instancia", "line_number": 16, "usage_type": "name" }, { "api_name": "django_tables2.Table", "line_number": 29, "usage_type": "attribute" }, { "api_name": "django_tables2.Column", "line_number": 30, "usage_type": "call" }, { "api_name": "django_tables2.Column", "line_number": 31, "usage_type": "call" }, { "api_name": "django.utils.safestring.mark_safe", "line_number": 34, "usage_type": "call" }, { "api_name": "website.models.Estudiante", "line_number": 43, "usage_type": "name" }, { "api_name": "django_tables2.Table", "line_number": 54, "usage_type": "attribute" }, { "api_name": "django_tables2.Column", "line_number": 55, "usage_type": "call" }, { "api_name": "django_tables2.Column", "line_number": 56, "usage_type": "call" }, { "api_name": "django_tables2.Column", "line_number": 57, "usage_type": "call" }, { "api_name": "website.models.Instancia", "line_number": 68, "usage_type": "name" } ]
3053519
import numpy as np from sklearn.datasets import load_boston #1 ๋ฐ์ดํ„ฐ dataset = load_boston() x = dataset.data y = dataset.target # print(x.shape) # (506, 13) # print(y.shape) # (506,) # print('===================') # print(x[:5]) # print(y[:10]) # print(np.max(x), np.min(x)) # print(dataset.feature_names) # print(dataset.DESCR) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state= 104, shuffle=True) #2 ๋ชจ๋ธ๊ตฌ์„ฑ from tensorflow.keras.models import Sequential,Model from tensorflow.keras.layers import Input,Dense input1 = Input(shape=(13,)) dense1 = Dense(120, activation='relu')(input1) dense1 = Dense(80)(dense1) dense1 = Dense(60)(dense1) dense1 = Dense(30)(dense1) dense1 = Dense(7)(dense1) dense1 = Dense(7)(dense1) dense1 = Dense(5)(dense1) dense1 = Dense(4)(dense1) output1 = Dense(1)(dense1) model = Model(inputs = input1, outputs = output1) #3 ์ปดํŒŒ์ผ ํ›ˆ๋ จ model.compile(loss='mse', optimizer='adam', metrics=['mae']) model.fit(x_train, y_train, epochs=1000, batch_size=4, validation_split=0.2, verbose=1) #4ํ‰๊ฐ€ ์˜ˆ์ธก loss, mae = model.evaluate(x_test, y_test, batch_size=4) print('loss,mae : ', loss, mae) y_predict = model.predict(x_test) from sklearn.metrics import mean_squared_error def RMSE(y_test, y_predict): return np.sqrt(mean_squared_error(y_test, y_predict)) print('RMSE : ', RMSE(y_test, y_predict )) from sklearn.metrics import r2_score r2 = r2_score(y_test, y_predict) print('R2: ', r2) # loss,mae : 10.771950721740723 2.4755585193634033 # RMSE : 3.2820651926340183 # R2: 0.8723619073719358
null
keras/keras18_boston1.py
keras18_boston1.py
py
1,640
python
en
code
null
code-starcoder2
83
[ { "api_name": "sklearn.datasets.load_boston", "line_number": 6, "usage_type": "call" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 19, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Input", "line_number": 25, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Dense", "line_number": 26, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Dense", "line_number": 27, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Dense", "line_number": 28, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Dense", "line_number": 29, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Dense", "line_number": 30, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Dense", "line_number": 31, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Dense", "line_number": 32, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Dense", "line_number": 33, "usage_type": "call" }, { "api_name": "tensorflow.keras.layers.Dense", "line_number": 34, "usage_type": "call" }, { "api_name": "tensorflow.keras.models.Model", "line_number": 35, "usage_type": "call" }, { "api_name": "numpy.sqrt", "line_number": 49, "usage_type": "call" }, { "api_name": "sklearn.metrics.mean_squared_error", "line_number": 49, "usage_type": "call" }, { "api_name": "sklearn.metrics.r2_score", "line_number": 53, "usage_type": "call" } ]
465589001
import json import sys, urllib.request as url from html.parser import HTMLParser #https://www.youtube.com/watch?v=ulzlXMZa8ak #http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python #answer submitted by stackoverflow user eloff class MLStripper(HTMLParser): def __init__(self): self.reset() self.strict = False self.convert_charrefs= True self.fed = [] def handle_data(self, d): self.fed.append(d) def get_data(self): return ''.join(self.fed) def strip_tags(html): s = MLStripper() s.feed(html) return s.get_data() try: #Addresslist=[] #Source=sys.argv[1] Source="ForestRd" destination="UniversityofNewHaven" #destination=sys.argv[1] except ValueError: print("Error in command line arguments") apicode = "AIzaSyD5Fqole2qOY6R9j5vJ-shLZY2ZYAlvAEE" #urlquery="http://maps.googleapis.com/maps/api/directions/json?origin=Jackson+Av&destination=Prospect+Av"+"&APPID="+apicode urlquery="http://maps.googleapis.com/maps/api/directions/json?origin="+Source+"&destination="+destination+"&APPID="+apicode page = url.urlopen(urlquery) if page.getcode() == 200 : DirectionString =page.read() DirectionString = DirectionString.decode("utf-8") Directiondata = json.loads(DirectionString) for i in range(0, len(Directiondata['routes'][0]['legs'][0]['steps'])): j = Directiondata['routes'][0]['legs'][0]['steps'][i]['html_instructions'] #strip_tags[j] print(strip_tags(j))
null
Python/GetMapDirection.py
GetMapDirection.py
py
1,522
python
en
code
null
code-starcoder2
83
[ { "api_name": "html.parser.HTMLParser", "line_number": 8, "usage_type": "name" }, { "api_name": "html.parser", "line_number": 21, "usage_type": "argument" }, { "api_name": "urllib.request.urlopen", "line_number": 34, "usage_type": "call" }, { "api_name": "urllib.request", "line_number": 34, "usage_type": "name" }, { "api_name": "json.loads", "line_number": 41, "usage_type": "call" } ]
111141535
import operator from flask import abort, jsonify from sqlalchemy.sql import func, case from . import bp from app import db from app.models import ( Acceptance, Author, Barrel, Batch, Boil, Btproduct, Container, Doctype, Document, Load, Lot, Manufacturer, Manufacturerlot, Product, Seller, Trademark, Weighting, ) @bp.route("/api/v1/boils/summary/<int:batchid>", methods=['GET']) def boil_summary_data(batchid): batch_data = Batch.get_name_date_plant_by_id(batchid) boil_subqry = db.session.query( Boil.BatchPK.label('batch_id'), Boil.ProductId.label('product_id'), Boil.Quantity.label('plan'), Product.ProductName.label('product_name') ).join( Product, Boil.ProductId == Product.ProductId ).filter( Boil.BatchPK == batchid ).subquery() wght_subqry = db.session.query( Weighting.BatchPK, Weighting.ProductId.label('product_id'), Product.ProductName.label('product_name'), func.sum(Weighting.Quantity).label('fact') ).join( Product, Weighting.ProductId == Product.ProductId ).filter( Weighting.BatchPK == batchid ).group_by( Weighting.BatchPK, Weighting.ProductId, Product.ProductName ).subquery() boil_qry = db.session.query( boil_subqry.c.product_id.label('b_product_id'), boil_subqry.c.product_name.label('b_product_name'), boil_subqry.c.plan.label('plan'), wght_subqry.c.product_id.label('w_product_id'), wght_subqry.c.product_name.label('w_product_name'), wght_subqry.c.fact.label('fact') ).join( wght_subqry, boil_subqry.c.product_id == wght_subqry.c.product_id, full=True ).order_by(case( [(boil_subqry.c.product_name != '', boil_subqry.c.product_name), ], else_=wght_subqry.c.product_name ) .asc()) boil_rows = boil_qry.all() if boil_rows is None: abort(404) data = [] for row in boil_rows: product_id = row.b_product_id if row.b_product_id\ else row.w_product_id product_name = row.b_product_name if row.b_product_name\ else row.w_product_name state = (row.plan is not None) and (row.fact is not None) data.append({ 'product_id': product_id, 'product_name': product_name, 'state': state, 'plan': str(row.plan), 'fact': str(row.fact) }) response = {'boil': {'name': batch_data['name'], 'date': batch_data['date'], 'plant': batch_data['plant'], 'data': data} } return jsonify(response) @bp.route("/api/v1/boils/weighting/<int:batchid>", methods=['GET']) def boil_weighting_data(batchid): batch_data = Batch.get_name_date_plant_by_id(batchid) wght_qry = db.session.query( Weighting.ProductId.label('product_id'), Product.ProductName.label('product_name'), Weighting.Quantity.label('quantity'), Lot.LotPK.label('lot_id'), Lot.LotName.label('lot'), Author.AuthorName.label('user'), Document.CreateDate.label('date') ).join( Product, Weighting.ProductId == Product.ProductId ).join( Lot, Weighting.LotPK == Lot.LotPK ).join( Document, Weighting.DocumentPK == Document.DocumentPK ).join( Author, Document.AuthorPK == Author.AuthorPK ).filter( Weighting.BatchPK == batchid ).order_by((Product.ProductName).asc()) weighting_rows = wght_qry.all() if weighting_rows is None: abort(404) data = [] for row in weighting_rows: data.append({ 'product_id': row.product_id, 'product_name': row.product_name, 'quantity': str(row.quantity), 'lot_id': row.lot_id, 'lot': row.lot, 'user': row.user, 'date': row.date.strftime("%d-%m-%Y %H:%M:%S") if row.date else None, # 'time': r.date.strftime("%H:%M:%S") if r.date else None, }) response = {'boil': {'name': batch_data['name'], 'date': batch_data['date'], 'plant': batch_data['plant'], 'data': data} } return jsonify(response) @bp.route("/api/v1/boils/load/<int:batchid>", methods=['GET']) def boil_load_data(batchid): batch_data = Batch.get_name_date_plant_by_id(batchid) dist_cont_subqry = db.session.query( Load.ContainerPK ).filter( Load.BatchPK == batchid ).distinct().subquery() cont_subqry = db.session.query( Load.ContainerPK.label('container_id'), Author.AuthorPK.label('user'), Document.CreateDate.label('date') ).distinct( Load.ContainerPK ).join( Document, Load.DocumentPK == Document.DocumentPK ).join( Author, Document.AuthorPK == Author.AuthorPK ).filter( Load.BatchPK == batchid ).subquery() wght_subqry = db.session.query( Weighting.ContainerPK.label('container_id'), Weighting.ProductId.label('product_id'), Product.ProductName.label('product_name') ).join( Product, Weighting.ProductId == Product.ProductId ).filter( Weighting.ContainerPK.in_(dist_cont_subqry) ).subquery() load_qry = db.session.query( cont_subqry.c.user.label('user'), cont_subqry.c.date.label('date'), wght_subqry.c.product_id.label('product_id'), wght_subqry.c.product_name.label('product_name') ).join( wght_subqry, cont_subqry.c.container_id == wght_subqry.c.container_id ).order_by( wght_subqry.c.product_name ) load_data = load_qry.all() data = [] for row in load_data: data.append({ 'product_id': row.product_id, 'product_name': row.product_name, 'user': row.user, 'date': row.date.strftime("%d-%m-%Y %H:%M:%S") if row.date else None, }) response = {'boil': {'name': batch_data['name'], 'date': batch_data['date'], 'plant': batch_data['plant'], 'data': data} } return jsonify(response)
null
web/app/api/routes.py
routes.py
py
6,422
python
en
code
null
code-starcoder2
83
[ { "api_name": "app.models.Batch.get_name_date_plant_by_id", "line_number": 31, "usage_type": "call" }, { "api_name": "app.models.Batch", "line_number": 31, "usage_type": "name" }, { "api_name": "app.models.Product", "line_number": 39, "usage_type": "argument" }, { "api_name": "app.db.session.query", "line_number": 33, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 33, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 33, "usage_type": "name" }, { "api_name": "app.models.Boil.BatchPK.label", "line_number": 34, "usage_type": "call" }, { "api_name": "app.models.Boil.BatchPK", "line_number": 34, "usage_type": "attribute" }, { "api_name": "app.models.Boil", "line_number": 34, "usage_type": "name" }, { "api_name": "app.models.Boil.ProductId.label", "line_number": 35, "usage_type": "call" }, { "api_name": "app.models.Boil.ProductId", "line_number": 35, "usage_type": "attribute" }, { "api_name": "app.models.Boil", "line_number": 35, "usage_type": "name" }, { "api_name": "app.models.Boil.Quantity.label", "line_number": 36, "usage_type": "call" }, { "api_name": "app.models.Boil.Quantity", "line_number": 36, "usage_type": "attribute" }, { "api_name": "app.models.Boil", "line_number": 36, "usage_type": "name" }, { "api_name": "app.models.Product.ProductName.label", "line_number": 37, "usage_type": "call" }, { "api_name": "app.models.Product.ProductName", "line_number": 37, "usage_type": "attribute" }, { "api_name": "app.models.Product", "line_number": 37, "usage_type": "name" }, { "api_name": "app.models.Boil.ProductId", "line_number": 39, "usage_type": "attribute" }, { "api_name": "app.models.Boil", "line_number": 39, "usage_type": "name" }, { "api_name": "app.models.Product.ProductId", "line_number": 39, "usage_type": "attribute" }, { "api_name": "app.models.Boil.BatchPK", "line_number": 41, "usage_type": "attribute" }, { "api_name": "app.models.Boil", "line_number": 41, "usage_type": "name" }, { "api_name": "app.models.Product", "line_number": 50, "usage_type": "argument" }, { "api_name": "app.db.session.query", "line_number": 44, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 44, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 44, "usage_type": "name" }, { "api_name": "app.models.Weighting.BatchPK", "line_number": 45, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 45, "usage_type": "name" }, { "api_name": "app.models.Weighting.ProductId.label", "line_number": 46, "usage_type": "call" }, { "api_name": "app.models.Weighting.ProductId", "line_number": 46, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 46, "usage_type": "name" }, { "api_name": "app.models.Product.ProductName.label", "line_number": 47, "usage_type": "call" }, { "api_name": "app.models.Product.ProductName", "line_number": 47, "usage_type": "attribute" }, { "api_name": "app.models.Product", "line_number": 47, "usage_type": "name" }, { "api_name": "sqlalchemy.sql.func.sum", "line_number": 48, "usage_type": "call" }, { "api_name": "sqlalchemy.sql.func", "line_number": 48, "usage_type": "name" }, { "api_name": "app.models.Weighting.Quantity", "line_number": 48, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 48, "usage_type": "name" }, { "api_name": "app.models.Weighting.ProductId", "line_number": 50, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 50, "usage_type": "name" }, { "api_name": "app.models.Product.ProductId", "line_number": 50, "usage_type": "attribute" }, { "api_name": "app.models.Weighting.BatchPK", "line_number": 52, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 52, "usage_type": "name" }, { "api_name": "app.models.Weighting.BatchPK", "line_number": 54, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 54, "usage_type": "name" }, { "api_name": "app.models.Weighting.ProductId", "line_number": 55, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 55, "usage_type": "name" }, { "api_name": "app.models.Product.ProductName", "line_number": 56, "usage_type": "attribute" }, { "api_name": "app.models.Product", "line_number": 56, "usage_type": "name" }, { "api_name": "app.db.session.query", "line_number": 59, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 59, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 59, "usage_type": "name" }, { "api_name": "sqlalchemy.sql.case", "line_number": 70, "usage_type": "call" }, { "api_name": "flask.abort", "line_number": 78, "usage_type": "call" }, { "api_name": "flask.jsonify", "line_number": 102, "usage_type": "call" }, { "api_name": "app.models.Batch.get_name_date_plant_by_id", "line_number": 108, "usage_type": "call" }, { "api_name": "app.models.Batch", "line_number": 108, "usage_type": "name" }, { "api_name": "app.models.Author", "line_number": 125, "usage_type": "argument" }, { "api_name": "app.models.Document", "line_number": 123, "usage_type": "argument" }, { "api_name": "app.models.Lot", "line_number": 121, "usage_type": "argument" }, { "api_name": "app.models.Product", "line_number": 119, "usage_type": "argument" }, { "api_name": "app.db.session.query", "line_number": 110, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 110, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 110, "usage_type": "name" }, { "api_name": "app.models.Weighting.ProductId.label", "line_number": 111, "usage_type": "call" }, { "api_name": "app.models.Weighting.ProductId", "line_number": 111, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 111, "usage_type": "name" }, { "api_name": "app.models.Product.ProductName.label", "line_number": 112, "usage_type": "call" }, { "api_name": "app.models.Product.ProductName", "line_number": 112, "usage_type": "attribute" }, { "api_name": "app.models.Product", "line_number": 112, "usage_type": "name" }, { "api_name": "app.models.Weighting.Quantity.label", "line_number": 113, "usage_type": "call" }, { "api_name": "app.models.Weighting.Quantity", "line_number": 113, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 113, "usage_type": "name" }, { "api_name": "app.models.Lot.LotPK.label", "line_number": 114, "usage_type": "call" }, { "api_name": "app.models.Lot.LotPK", "line_number": 114, "usage_type": "attribute" }, { "api_name": "app.models.Lot", "line_number": 114, "usage_type": "name" }, { "api_name": "app.models.Lot.LotName.label", "line_number": 115, "usage_type": "call" }, { "api_name": "app.models.Lot.LotName", "line_number": 115, "usage_type": "attribute" }, { "api_name": "app.models.Lot", "line_number": 115, "usage_type": "name" }, { "api_name": "app.models.Author.AuthorName.label", "line_number": 116, "usage_type": "call" }, { "api_name": "app.models.Author.AuthorName", "line_number": 116, "usage_type": "attribute" }, { "api_name": "app.models.Author", "line_number": 116, "usage_type": "name" }, { "api_name": "app.models.Document.CreateDate.label", "line_number": 117, "usage_type": "call" }, { "api_name": "app.models.Document.CreateDate", "line_number": 117, "usage_type": "attribute" }, { "api_name": "app.models.Document", "line_number": 117, "usage_type": "name" }, { "api_name": "app.models.Weighting.ProductId", "line_number": 119, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 119, "usage_type": "name" }, { "api_name": "app.models.Product.ProductId", "line_number": 119, "usage_type": "attribute" }, { "api_name": "app.models.Weighting.LotPK", "line_number": 121, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 121, "usage_type": "name" }, { "api_name": "app.models.Lot.LotPK", "line_number": 121, "usage_type": "attribute" }, { "api_name": "app.models.Weighting.DocumentPK", "line_number": 123, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 123, "usage_type": "name" }, { "api_name": "app.models.Document.DocumentPK", "line_number": 123, "usage_type": "attribute" }, { "api_name": "app.models.Document.AuthorPK", "line_number": 125, "usage_type": "attribute" }, { "api_name": "app.models.Document", "line_number": 125, "usage_type": "name" }, { "api_name": "app.models.Author.AuthorPK", "line_number": 125, "usage_type": "attribute" }, { "api_name": "app.models.Weighting.BatchPK", "line_number": 127, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 127, "usage_type": "name" }, { "api_name": "app.models.Product.ProductName.asc", "line_number": 128, "usage_type": "call" }, { "api_name": "app.models.Product.ProductName", "line_number": 128, "usage_type": "attribute" }, { "api_name": "app.models.Product", "line_number": 128, "usage_type": "name" }, { "api_name": "flask.abort", "line_number": 133, "usage_type": "call" }, { "api_name": "flask.jsonify", "line_number": 156, "usage_type": "call" }, { "api_name": "app.models.Batch.get_name_date_plant_by_id", "line_number": 162, "usage_type": "call" }, { "api_name": "app.models.Batch", "line_number": 162, "usage_type": "name" }, { "api_name": "app.db.session.query", "line_number": 164, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 164, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 164, "usage_type": "name" }, { "api_name": "app.models.Load.ContainerPK", "line_number": 165, "usage_type": "attribute" }, { "api_name": "app.models.Load", "line_number": 165, "usage_type": "name" }, { "api_name": "app.models.Load.BatchPK", "line_number": 167, "usage_type": "attribute" }, { "api_name": "app.models.Load", "line_number": 167, "usage_type": "name" }, { "api_name": "app.models.Author", "line_number": 179, "usage_type": "argument" }, { "api_name": "app.models.Document", "line_number": 177, "usage_type": "argument" }, { "api_name": "app.db.session.query", "line_number": 170, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 170, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 170, "usage_type": "name" }, { "api_name": "app.models.Load.ContainerPK.label", "line_number": 171, "usage_type": "call" }, { "api_name": "app.models.Load.ContainerPK", "line_number": 171, "usage_type": "attribute" }, { "api_name": "app.models.Load", "line_number": 171, "usage_type": "name" }, { "api_name": "app.models.Author.AuthorPK.label", "line_number": 172, "usage_type": "call" }, { "api_name": "app.models.Author.AuthorPK", "line_number": 172, "usage_type": "attribute" }, { "api_name": "app.models.Author", "line_number": 172, "usage_type": "name" }, { "api_name": "app.models.Document.CreateDate.label", "line_number": 173, "usage_type": "call" }, { "api_name": "app.models.Document.CreateDate", "line_number": 173, "usage_type": "attribute" }, { "api_name": "app.models.Document", "line_number": 173, "usage_type": "name" }, { "api_name": "app.models.Load.ContainerPK", "line_number": 175, "usage_type": "attribute" }, { "api_name": "app.models.Load", "line_number": 175, "usage_type": "name" }, { "api_name": "app.models.Load.DocumentPK", "line_number": 177, "usage_type": "attribute" }, { "api_name": "app.models.Load", "line_number": 177, "usage_type": "name" }, { "api_name": "app.models.Document.DocumentPK", "line_number": 177, "usage_type": "attribute" }, { "api_name": "app.models.Document.AuthorPK", "line_number": 179, "usage_type": "attribute" }, { "api_name": "app.models.Document", "line_number": 179, "usage_type": "name" }, { "api_name": "app.models.Author.AuthorPK", "line_number": 179, "usage_type": "attribute" }, { "api_name": "app.models.Load.BatchPK", "line_number": 181, "usage_type": "attribute" }, { "api_name": "app.models.Load", "line_number": 181, "usage_type": "name" }, { "api_name": "app.models.Product", "line_number": 189, "usage_type": "argument" }, { "api_name": "app.db.session.query", "line_number": 184, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 184, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 184, "usage_type": "name" }, { "api_name": "app.models.Weighting.ContainerPK.label", "line_number": 185, "usage_type": "call" }, { "api_name": "app.models.Weighting.ContainerPK", "line_number": 185, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 185, "usage_type": "name" }, { "api_name": "app.models.Weighting.ProductId.label", "line_number": 186, "usage_type": "call" }, { "api_name": "app.models.Weighting.ProductId", "line_number": 186, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 186, "usage_type": "name" }, { "api_name": "app.models.Product.ProductName.label", "line_number": 187, "usage_type": "call" }, { "api_name": "app.models.Product.ProductName", "line_number": 187, "usage_type": "attribute" }, { "api_name": "app.models.Product", "line_number": 187, "usage_type": "name" }, { "api_name": "app.models.Weighting.ProductId", "line_number": 189, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 189, "usage_type": "name" }, { "api_name": "app.models.Product.ProductId", "line_number": 189, "usage_type": "attribute" }, { "api_name": "app.models.Weighting.ContainerPK.in_", "line_number": 191, "usage_type": "call" }, { "api_name": "app.models.Weighting.ContainerPK", "line_number": 191, "usage_type": "attribute" }, { "api_name": "app.models.Weighting", "line_number": 191, "usage_type": "name" }, { "api_name": "app.db.session.query", "line_number": 194, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 194, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 194, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 224, "usage_type": "call" } ]
150331567
""" ???+ note "Child classes which are `functionality`-by-`feature` products." This could resemble template specialization in C++. """ from .functionality import ( BokehDataAnnotator, ) from .feature import BokehForText class BokehTextAnnotator(BokehDataAnnotator, BokehForText): """ ???+ note "The text flavor of `BokehDataAnnotator`."" """ TOOLTIP_KWARGS = BokehForText.TOOLTIP_KWARGS MANDATORY_COLUMNS = BokehForText.MANDATORY_COLUMNS SUBSET_GLYPH_KWARGS = BokehDataAnnotator.SUBSET_GLYPH_KWARGS def _layout_widgets(self): """Define the layout of widgets.""" from bokeh.layouts import column, row layout_rows = ( row(self.search_pos, self.search_neg), # row(self.data_key_button_group), row(self.annotator_input, self.annotator_apply, self.annotator_export), ) return column(*layout_rows)
null
hover/core/explorer/specialization.py
specialization.py
py
910
python
en
code
null
code-starcoder2
83
[ { "api_name": "functionality.BokehDataAnnotator", "line_number": 11, "usage_type": "name" }, { "api_name": "feature.BokehForText", "line_number": 11, "usage_type": "name" }, { "api_name": "feature.BokehForText.TOOLTIP_KWARGS", "line_number": 16, "usage_type": "attribute" }, { "api_name": "feature.BokehForText", "line_number": 16, "usage_type": "name" }, { "api_name": "feature.BokehForText.MANDATORY_COLUMNS", "line_number": 17, "usage_type": "attribute" }, { "api_name": "feature.BokehForText", "line_number": 17, "usage_type": "name" }, { "api_name": "functionality.BokehDataAnnotator.SUBSET_GLYPH_KWARGS", "line_number": 18, "usage_type": "attribute" }, { "api_name": "functionality.BokehDataAnnotator", "line_number": 18, "usage_type": "name" }, { "api_name": "bokeh.layouts.row", "line_number": 25, "usage_type": "call" }, { "api_name": "bokeh.layouts.row", "line_number": 27, "usage_type": "call" }, { "api_name": "bokeh.layouts.column", "line_number": 29, "usage_type": "call" } ]
418054407
#!/usr/bin/env python # -*- coding: utf-8 -*- # BabyNames python coding exercise. # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ import sys import re import argparse def extract_names(filename): names = [] birth_year = re.findall(r'\d+', filename)[0] names.append(birth_year) with open(filename, 'r') as file: reg_ex = r'<td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>' ranked_names = re.findall(reg_ex, file.read()) for name_tup in ranked_names: boy_name = name_tup[1] girl_name = name_tup[2] name_rank = str(name_tup[0]) if boy_name not in ' '.join(names): names.append(boy_name + ' ' + name_rank) if girl_name not in ' '.join(names): names.append(girl_name + ' ' + name_rank) return sorted(names) def create_parser(): parser = argparse.ArgumentParser( description="Extracts and alphabetizes baby names from html.") parser.add_argument( '--summaryfile', help='creates a summary file', action='store_true') parser.add_argument('files', help='filename(s) to parse', nargs='+') return parser def main(args): # Implement command-line parser parser = create_parser() ns = parser.parse_args(args) if not ns: parser.print_usage() sys.exit(1) file_list = ns.files create_summary = ns.summaryfile # Get names list for all files supplied and either print to shell or # write summary file for file in file_list: names = '\n'.join(extract_names(file)) + '\n' if create_summary: output_file = file + '.summary' with open(output_file, 'w') as summary_file: summary_file.write(names) else: print(names) if __name__ == '__main__': main(sys.argv[1:])
null
babynames.py
babynames.py
py
1,980
python
en
code
null
code-starcoder2
83
[ { "api_name": "re.findall", "line_number": 21, "usage_type": "call" }, { "api_name": "re.findall", "line_number": 26, "usage_type": "call" }, { "api_name": "argparse.ArgumentParser", "line_number": 42, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 58, "usage_type": "call" }, { "api_name": "sys.argv", "line_number": 76, "usage_type": "attribute" } ]
506055926
import random import json from websocket import create_connection import time import os import stomper from datetime import date import timeout_decorator import uuid mac_address = '_'.join(['{:02x}'.format((uuid.getnode() >> ele) & 0xff) for ele in range(0,8*6,8)][::-1]) print(mac_address) # import FakeRPi.GPIO as GPIO import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) GPIO.setup(17, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(22, GPIO.OUT) GPIO.output(17,1) GPIO.output(27,1) GPIO.output(22,1) import multiprocessing.pool import functools # hostname = "192.168.0.202" # # while True: # response = os.system("ping -c 1 " + hostname) # # if response == 0: # print(hostname, 'Reboot successful!') # time.sleep(10) # os.system("/usr/bin/chromium-browser --disable-session-crashed-bubble --disable-features=InfiniteSessionRestore --disable-infobars --kiosk http://192.168.0.202:9001/#/signin-token?token=2e981186e7d75f194ca9ef82e2b1f441edb5b42254ea14d3e725b03a1ebb29b7c570af39eefbab78e59588be18e1c6f41756ed0e9c192957dfd3cfce121a5b6b") # break # else: # continue def Logging(string, response, server): line = (string + ' - ' + str(time.strftime("%m/%d/%Y, %H:%M:%S", time.localtime())) + " - studenId: " + str(response['studentId']) + " - roomId: " + str(response['roomId']) + " - shiftCode: " + str(response['shiftCode']) + " - nvrId: " + str(response['nvrId']) + " - cameraId: " + str(response['cameraId']) + " - Server Websocket: " + str(server['server_websocket'])) print(line) f = open('/home/pi/Scripts/Logs/'+ date.today().strftime("%d-%m-%y")+ '.log','a') # Ghi log theo tung ngay f.writelines(line + "\n") f.close() # Log ngแบฏn gแปn hฦกn def Log1(string, response, server): line = (string + ' - ' + str(time.strftime("%m/%d/%Y, %H:%M:%S", time.localtime())) + " - studenId: " + str(response['studentId']) + " - roomId: " + str(response['roomId']) + " - shiftCode: " + str(response['shiftCode']) + " - nvrId: " + str(response['nvrId']) + " - cameraId: " + str(response['cameraId'])) print(line) f = open('/home/pi/Scripts/Logs/'+ date.today().strftime("%d-%m-%y")+ '.log','a') # Ghi log theo tung ngay f.writelines(line + "\n") f.close() def Log2(string): line = (string) print(line) # f = open('/home/pi/Scripts/Logs/'+ date.today().strftime("%d-%m-%y")+ '.log','a') # Ghi log theo tung ngay # f.writelines(line + "\n") # f.close() def Connect(server): Log2("Connecting SmartAccess: "+ server['server_websocket'] + "....") ws = create_connection("ws://"+server['server_websocket']) sub = stomper.subscribe(server['topic1'], str(random.randint(0, 1000)), ack='auto') ws.send(sub) sub = stomper.subscribe(server['topic2'], str(random.randint(0, 1000)), ack='auto') ws.send(sub) Log2("--------------- Connect Successfully!") return ws def Receive(ws): Log2("+ Waiting for data ....") response = str(ws.recv()) return json.loads(response[response.find("{"):-1]) def Open_door(response, server): GPIO.output(17,0) GPIO.output(27,0) GPIO.output(22,0) Logging("-- OPENED DOOR", response, server) def Close_door(response, server): GPIO.output(17,1) GPIO.output(27,1) GPIO.output(22,1) Log2("-- CLOSED DOOR") def Get_config_file(): while True: try: with open('/home/pi/Scripts/server.txt') as f: server = json.load(f) with open('/home/pi/Scripts/dc_a6_32_1a_88_39.txt') as f: data = json.load(f) return server, data except: Log2("Updating config file...") continue if __name__ == '__main__': server, data = old_server, old_data = Get_config_file() while True: try: ws = Connect(server) break except: time.sleep(int(data['wait_error'])) continue def Open_Check(ws, response, data, server): print('Data received: cameraId: ' + response['cameraId'] + ' - shiftCode: ' + str(response['shiftCode'])) if response['shiftCode'] == None or 1: if response['cameraId'] in data['cameraId'].replace(" ","").split(","): Open_door(response, server) start = time.time() person = True while person: try: person = Check(ws) except: person = False print("Total time open door: ", time.time() - start, " seconds") while True: server, data = Get_config_file() @timeout_decorator.timeout(int(data['door_open'])) def Check(ws): print("Wait " + data['door_open'] + "sec for data ....") response = str(ws.recv()) response = json.loads(response[response.find("{"):-1]) if response['shiftCode'] == None or 1: if response['cameraId'] in data['cameraId'].replace(" ","").split(","): Log1("More 5sec for: ", response, server) return True return False @timeout_decorator.timeout(int(data['safe_time'])) def Safe(): Log2("Thแปi gian ฤ‘รณng an toร n " + data['safe_time'] + "s bแบฏt ฤ‘แบงu ....") while True: response = str(ws.recv()) response = json.loads(response[response.find("{"):-1]) if response['shift Code'] == None or 1: if response['cameraId'] in data['cameraId'].replace(" ","").split(","): Log1('ฤรณng an toร n khรดng mแปŸ: ', response, server) @timeout_decorator.timeout(int(data['update_config'])) # Nแบฟu cแปญa khรดng mแปŸ Sau 5min * 60sec = 300sec chแบกy lแบกi 1 kแบฟt nแป‘i def main(ws, wss): try: response = Receive(wss) except: ws = Connect(server) response = Receive(ws) Open_Check(ws, response, data, server) Close_door(response, server) try: Safe() except: print("* * *") if server == old_server and data == old_data: try: main(ws, ws) except: Log2(" >>> Cแบญp nhแบญt lแบกi config sau 5 phรบt") else: old_server, old_data = server, data # Cแบญp nhแบญt lแบกi thรดng sแป‘ config chแป so sรกnh lแบงn sau ws = Connect(server)
null
Scripts/ServiceScript.py
ServiceScript.py
py
6,779
python
en
code
null
code-starcoder2
83
[ { "api_name": "uuid.getnode", "line_number": 10, "usage_type": "call" }, { "api_name": "RPi.GPIO.setmode", "line_number": 16, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 16, "usage_type": "name" }, { "api_name": "RPi.GPIO.BCM", "line_number": 16, "usage_type": "attribute" }, { "api_name": "RPi.GPIO.setup", "line_number": 17, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 17, "usage_type": "name" }, { "api_name": "RPi.GPIO.OUT", "line_number": 17, "usage_type": "attribute" }, { "api_name": "RPi.GPIO.setup", "line_number": 18, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 18, "usage_type": "name" }, { "api_name": "RPi.GPIO.OUT", "line_number": 18, "usage_type": "attribute" }, { "api_name": "RPi.GPIO.setup", "line_number": 19, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 19, "usage_type": "name" }, { "api_name": "RPi.GPIO.OUT", "line_number": 19, "usage_type": "attribute" }, { "api_name": "RPi.GPIO.output", "line_number": 21, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 21, "usage_type": "name" }, { "api_name": "RPi.GPIO.output", "line_number": 22, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 22, "usage_type": "name" }, { "api_name": "RPi.GPIO.output", "line_number": 23, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 23, "usage_type": "name" }, { "api_name": "time.strftime", "line_number": 43, "usage_type": "call" }, { "api_name": "time.localtime", "line_number": 43, "usage_type": "call" }, { "api_name": "datetime.date.today", "line_number": 51, "usage_type": "call" }, { "api_name": "datetime.date", "line_number": 51, "usage_type": "name" }, { "api_name": "time.strftime", "line_number": 57, "usage_type": "call" }, { "api_name": "time.localtime", "line_number": 57, "usage_type": "call" }, { "api_name": "datetime.date.today", "line_number": 64, "usage_type": "call" }, { "api_name": "datetime.date", "line_number": 64, "usage_type": "name" }, { "api_name": "websocket.create_connection", "line_number": 78, "usage_type": "call" }, { "api_name": "stomper.subscribe", "line_number": 79, "usage_type": "call" }, { "api_name": "random.randint", "line_number": 79, "usage_type": "call" }, { "api_name": "stomper.subscribe", "line_number": 81, "usage_type": "call" }, { "api_name": "random.randint", "line_number": 81, "usage_type": "call" }, { "api_name": "json.loads", "line_number": 89, "usage_type": "call" }, { "api_name": "RPi.GPIO.output", "line_number": 92, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 92, "usage_type": "name" }, { "api_name": "RPi.GPIO.output", "line_number": 93, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 93, "usage_type": "name" }, { "api_name": "RPi.GPIO.output", "line_number": 94, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 94, "usage_type": "name" }, { "api_name": "RPi.GPIO.output", "line_number": 98, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 98, "usage_type": "name" }, { "api_name": "RPi.GPIO.output", "line_number": 99, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 99, "usage_type": "name" }, { "api_name": "RPi.GPIO.output", "line_number": 100, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 100, "usage_type": "name" }, { "api_name": "json.load", "line_number": 108, "usage_type": "call" }, { "api_name": "json.load", "line_number": 110, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 125, "usage_type": "call" }, { "api_name": "time.time", "line_number": 134, "usage_type": "call" }, { "api_name": "time.time", "line_number": 142, "usage_type": "call" }, { "api_name": "json.loads", "line_number": 151, "usage_type": "call" }, { "api_name": "timeout_decorator.timeout", "line_number": 147, "usage_type": "call" }, { "api_name": "json.loads", "line_number": 163, "usage_type": "call" }, { "api_name": "timeout_decorator.timeout", "line_number": 158, "usage_type": "call" }, { "api_name": "timeout_decorator.timeout", "line_number": 168, "usage_type": "call" } ]
434556586
from functools import partial import torch from torch import nn as nn from torch.nn import functional as F def conv3d(in_channels, out_channels, kernel_size, bias, padding): """ Create convolution layer Inputs: :param in_channels: :param out_channels: :param kernel_size: :param bias: :param padding: Return : a 3d convolution """ return nn.Conv3d(in_channels, out_channels, kernel_size, padding=padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding): """ Create single convolution block with non-linearity and optional batchnorm/groupnorm. Input: :param in_channels (int): number of input channels :param out_channels (int): number of output channels :param kernel_size (int or tuple): size of the convolving kernel :param order (string): order of the layers in the convolution block 'cr' -> conv + ReLU 'cl' -> conv + LeakyReLU 'gcr' -> groupnorm + conv + ReLU 'bcr' -> batchnorm + conv + ReLU :param num_groups (int): number of groups for the GroupNorm :param padding (int or tuple): add zero-padding added to all three sides of the input Return: list of tuple (name, module) """ assert 'c' in order, "Conv layer MUST be present" assert order[0] not in 'rl', 'Non-linearity cannot be the first operation in the layer' # Build up the convolution block modules = [] for i, char in enumerate(order): # Append layers accoring to the order of the chars # add ReLU activation layer if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) # add LeakyReLU activation layer elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) # add convolution layer elif char == 'c': # add learnable bias only in the absence of batchnorm/groupnorm bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) # add gourp normalization (better in small batches) elif char == 'g': is_before_conv = i < order.index('c') if is_before_conv: num_channels = in_channels else: num_channels = out_channels # use only one group if the given number of groups is greater than the number of channels if num_channels < num_groups: num_groups = 1 assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}' modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=num_channels))) # add batch normalization (better in bigger batches) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) # if the order does not include these layers else: raise ValueError(f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']") return modules class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. Inputs: :param in_channels (int): number of input channels :param out_channels (int): number of output channels :param kernel_size (int or tuple): size of the convolving kernel :param order (string): order of the layers in the convolution block 'cr' -> conv + ReLU 'cl' -> conv + LeakyReLU 'gcr' -> groupnorm + conv + ReLU 'bcr' -> batchnorm + conv + ReLU :param num_groups (int): number of groups for the GroupNorm :param padding (int or tuple): add zero-padding added to all three sides of the input """ def __init__(self, in_channels, out_channels, kernel_size=3, order='gcr', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class DoubleConv(nn.Sequential): """ Class for two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d). We use (Conv3d+ReLU+GroupNorm3d) by default, however, This can be changed however by providing the order argument, e.g. in order to change to Conv3d+BatchNorm3d+ELU use order='cbe'. Inputs: :param in_channels (int): number of input channels :param out_channels (int): number of output channels :param encoder (bool): if True we're in the encoder path, otherwise we're in the decoder :param kernel_size (int or tuple): size of the convolving kernel :param order (string): order of the layers in the convolution block 'cr' -> conv + ReLU 'cl' -> conv + LeakyReLU 'gcr' -> groupnorm + conv + ReLU 'bcr' -> batchnorm + conv + ReLU :param num_groups (int): number of groups for the GroupNorm :param padding (int or tuple): add zero-padding added to all three sides of the input """ def __init__(self, in_channels, out_channels, encoder, kernel_size=3, order='gcr', num_groups=8, padding=1): super(DoubleConv, self).__init__() if encoder: # ENCODER BLOCK conv1_in_channels = in_channels conv1_out_channels = out_channels // 2 if conv1_out_channels < in_channels: conv1_out_channels = in_channels conv2_in_channels, conv2_out_channels = conv1_out_channels, out_channels else: # DECODER BLOCK (decrease the number of channels in the 1st convolution) conv1_in_channels, conv1_out_channels = in_channels, out_channels conv2_in_channels, conv2_out_channels = out_channels, out_channels # conv1 self.add_module('SingleConv1', SingleConv(conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups, padding=padding)) # conv2 self.add_module('SingleConv2', SingleConv(conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups, padding=padding)) class Encoder(nn.Module): """ Single ENCODER module consisting of the optional max/avg pooling layer. Inputs: :param in_channels (int): number of input channels :param out_channels (int): number of output channels :param conv_kernel_size (int or tuple): size of the convolving kernel :param apply_pooling (bool): if True use MaxPool3d before DoubleConv :param pool_kernel_size (int or tuple): the size of the window :param pool_type (str): pooling layer: 'max' or 'avg' :param conv_layer_order (string): determines the order of layers :param num_groups (int): number of groups for the GroupNorm :param padding (int or tuple): add zero-padding added to all three sides of the input """ def __init__(self, in_channels, out_channels, conv_kernel_size=3, apply_pooling=True, pool_kernel_size=2, pool_type='max', conv_layer_order='gcr', num_groups=2, padding=1): super(Encoder, self).__init__() assert pool_type in ['max', 'avg'] # Pooling layer if apply_pooling: if pool_type == 'max': self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size) else: self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size) else: self.pooling = None # Encoder module self.basic_module = DoubleConv(in_channels, out_channels, encoder=True, kernel_size=conv_kernel_size, order=conv_layer_order, num_groups=num_groups, padding=padding) def forward(self, x): if self.pooling is not None: x = self.pooling(x) x = self.basic_module(x) return x class Decoder(nn.Module): """ One level of the decoder module with the upsampling layer followed by a basic module (DoubleConv). Input: :param in_channels (int): number of input channels :param out_channels (int): number of output channels :param conv_kernel_size (int or tuple): size of the convolving kernel :param scale_factor (tuple): used as the multiplier for the image H/W/D in case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation from the corresponding encoder :param conv_layer_order (string): determines the order of layers :param num_groups (int): number of groups for the GroupNorm :param mode (string): algorithm used for upsampling the options are the following... 'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area' :param padding (int or tuple): add zero-padding added to all three sides of the input """ def __init__(self, in_channels, out_channels, conv_kernel_size=3, conv_layer_order='gcr', num_groups=8, mode='nearest', padding=1): super(Decoder, self).__init__() # interpolation for upsampling and concatenation joining self.upsampling = Upsampling(mode=mode) # concat joining self.joining = partial(self._joining, concat=True) self.basic_module = DoubleConv(in_channels, out_channels, encoder=False, kernel_size=conv_kernel_size, order=conv_layer_order, num_groups=num_groups, padding=padding) def forward(self, encoder_features, x): x = self.upsampling(encoder_features=encoder_features, x=x) x = self.joining(encoder_features, x) x = self.basic_module(x) return x @staticmethod def _joining(encoder_features, x, concat): if concat: return torch.cat((encoder_features, x), dim=1) else: return encoder_features + x class Upsampling(nn.Module): """ Upsamples a given multi-channel 3D data using interpolation. Input: :param mode (string): algorithm used for upsampling the options are the following... 'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area' """ def __init__(self, mode='nearest'): super(Upsampling, self).__init__() self.upsample = partial(self._interpolate, mode=mode) def forward(self, encoder_features, x): output_size = encoder_features.size()[2:] return self.upsample(x, output_size) @staticmethod def _interpolate(x, size, mode): return F.interpolate(x, size=size, mode=mode)
null
model/layers.py
layers.py
py
11,548
python
en
code
null
code-starcoder2
83
[ { "api_name": "torch.nn.Conv3d", "line_number": 20, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 20, "usage_type": "name" }, { "api_name": "torch.nn.ReLU", "line_number": 51, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 51, "usage_type": "name" }, { "api_name": "torch.nn.LeakyReLU", "line_number": 55, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 55, "usage_type": "name" }, { "api_name": "torch.nn.GroupNorm", "line_number": 75, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 75, "usage_type": "name" }, { "api_name": "torch.nn.BatchNorm3d", "line_number": 81, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 81, "usage_type": "name" }, { "api_name": "torch.nn.BatchNorm3d", "line_number": 83, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 83, "usage_type": "name" }, { "api_name": "torch.nn.Sequential", "line_number": 92, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 92, "usage_type": "name" }, { "api_name": "torch.nn.Sequential", "line_number": 117, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 117, "usage_type": "name" }, { "api_name": "torch.nn.Module", "line_number": 169, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 169, "usage_type": "name" }, { "api_name": "torch.nn.MaxPool3d", "line_number": 196, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 196, "usage_type": "name" }, { "api_name": "torch.nn.AvgPool3d", "line_number": 198, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 198, "usage_type": "name" }, { "api_name": "torch.nn.Module", "line_number": 217, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 217, "usage_type": "name" }, { "api_name": "functools.partial", "line_number": 245, "usage_type": "call" }, { "api_name": "torch.cat", "line_number": 263, "usage_type": "call" }, { "api_name": "torch.nn.Module", "line_number": 268, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 268, "usage_type": "name" }, { "api_name": "functools.partial", "line_number": 280, "usage_type": "call" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 288, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 288, "usage_type": "name" } ]
258592833
#!/usr/bin/env python import pystache import os import json import codecs import HTMLParser API_DIR = "apis" OUT_DIR = "aws-sdk" def parse_operations(iterator_fn): operations = [] for op_name, operation in iterator_fn(): if operation.get("deprecated") is not True: doc = operation.get("documentation", " ") operation.update({"documentation": doc}) inp = operation.get("input") if inp: operation.update({"input_shape_or_empty": inp}) else: operation.update({"input_shape_or_empty": {"shape": "{}"}}) operations.append(operation) return operations def parse_shapes(iterator_fn): shapes = { "string": [], "structure": [], "list": [], "double": [], "integer": [], "long": [], "boolean": [], "map": [], "blob": [], "timestamp": [], "float": []} for shape_name, shape in iterator_fn(): shape_type = str(shape["type"]) shape["name"] = shape_name # ensure empty documentation if none exists (otherwise mustache will start searching recursively) shape.update({"documentation": shape.get("documentation", " ")}) # remove patterns if shape.get("pattern"): shape.pop("pattern", None) if shape_type == "structure": members = [] query_string_members = [] uri_members = [] header_members = [] for member_name, member in shape["members"].iteritems(): member["name"] = member_name member["locationName"] = member.get("locationName", member_name) member["documentation"] = member.get("documentation", "").replace("\n", "") if member.get("location") == "querystring": query_string_members.append(member) elif member.get("location") == "uri": uri_members.append(member) elif member.get("location") == "header": header_members.append(member) members.append(member) shape["member_names"] = ",".join(shape["members"].keys()) shape["members"] = members shape["query_string_members"] = query_string_members shape["uri_members"] = uri_members shape["header_members"] = header_members if shape.get("required"): required = [] for required_name in shape["required"]: required.append({"name": required_name}) shape.update({"required": required}) shapes[shape_type].append(shape) return shapes def generate(): with open("operations.mtl", 'r') as f: op_mtl = f.read() for filename in os.listdir(API_DIR): if filename.endswith(".normal.json"): print("Parsing " + filename) j = json.load(open(os.path.join(API_DIR, filename))) j["operations"] = parse_operations(j["operations"].iteritems) j["shapes"] = parse_shapes(j["shapes"].iteritems) out_filename = j["metadata"]["endpointPrefix"].replace(".", "_") + ".lua" print("Writing " + out_filename) lua = pystache.render(op_mtl, j) with codecs.open(os.path.join(OUT_DIR, out_filename), 'wb', "utf-8") as f: f.write(HTMLParser.HTMLParser().unescape(lua)) if __name__ == "__main__": generate()
null
generate.py
generate.py
py
3,533
python
en
code
null
code-starcoder2
83
[ { "api_name": "os.listdir", "line_number": 85, "usage_type": "call" }, { "api_name": "json.load", "line_number": 88, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 88, "usage_type": "call" }, { "api_name": "os.path", "line_number": 88, "usage_type": "attribute" }, { "api_name": "pystache.render", "line_number": 96, "usage_type": "call" }, { "api_name": "codecs.open", "line_number": 97, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 97, "usage_type": "call" }, { "api_name": "os.path", "line_number": 97, "usage_type": "attribute" }, { "api_name": "HTMLParser.HTMLParser", "line_number": 98, "usage_type": "call" } ]
368755387
import numpy import os import shutil import av def load_video( path=os.path.expanduser('/data/lisatmp4/dejoieti/data_colo/polyp_video_frames'), crop=False, data_augment=False, split=[.5, .25], normalize=False, whiten=False, rng=None, nImg=None): im_path = os.path.join(path, 'Original',) mask_path = os.path.join(path, 'Ground Truth',) training_set_path = "/data/lisatmp4/dejoieti/data_colo/TrainingSet" for video_index, test_directory in enumerate(os.listdir(training_set_path)): directory_path = os.path.join(training_set_path, test_directory) if os.path.isdir(directory_path): image_frames = filter(lambda l: l.endswith(".tiff"), os.listdir(os.path.join(directory_path, "GT"))) image_frames = list(map(lambda l: os.path.join(directory_path, "GT", l), image_frames)) for frame_index, frame in enumerate(image_frames): shutil.copyfile(frame, mask_path + str(video_index) + "_" + str(frame_index) + ".tiff") video_file_path = list(filter(lambda l: l.endswith(".wmv"), os.listdir(directory_path)))[0] video_file_path = os.path.join(directory_path, video_file_path) container = av.open(video_file_path) # number_of_frames_in_video = 0 frame_index = 0 for packet in container.demux(): for frame in packet.decode(): if type(frame) == av.video.frame.VideoFrame: frame.to_image().save("Original/"+ video_index +"_" + ".tiff") frame_index += 1
null
utils/VideoReading.py
VideoReading.py
py
1,793
python
en
code
null
code-starcoder2
83
[ { "api_name": "os.path.expanduser", "line_number": 7, "usage_type": "call" }, { "api_name": "os.path", "line_number": 7, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 17, "usage_type": "call" }, { "api_name": "os.path", "line_number": 17, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 18, "usage_type": "call" }, { "api_name": "os.path", "line_number": 18, "usage_type": "attribute" }, { "api_name": "os.listdir", "line_number": 22, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 23, "usage_type": "call" }, { "api_name": "os.path", "line_number": 23, "usage_type": "attribute" }, { "api_name": "os.path.isdir", "line_number": 24, "usage_type": "call" }, { "api_name": "os.path", "line_number": 24, "usage_type": "attribute" }, { "api_name": "os.listdir", "line_number": 26, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 26, "usage_type": "call" }, { "api_name": "os.path", "line_number": 26, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 27, "usage_type": "call" }, { "api_name": "os.path", "line_number": 27, "usage_type": "attribute" }, { "api_name": "shutil.copyfile", "line_number": 31, "usage_type": "call" }, { "api_name": "os.listdir", "line_number": 36, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 37, "usage_type": "call" }, { "api_name": "os.path", "line_number": 37, "usage_type": "attribute" }, { "api_name": "av.open", "line_number": 38, "usage_type": "call" }, { "api_name": "av.video", "line_number": 44, "usage_type": "attribute" } ]
466058833
#!/usr/bin/python import urllib2 import xml.etree.ElementTree as ElementTree import re def refine_table(table): result = table result = re.sub(r"<td.*?>", "<td>", result) result = re.sub(r"<tr.*?>", "<tr>", result) result = re.sub(r"<a.*?>(.*?)</a>", "\\1", result) result = re.sub(r"<span.*?>(.*?)</span>", "\\1", result) result = re.sub(r"<b.*?>(.*?)</b>", "\\1", result) result = re.sub(r"<br\s?/>", "", result) result = re.sub(r"<sup.*?/sup>", "", result) result = re.sub(r"<sub.*?/sub>", "", result) result = re.sub(r"<caption.*?/caption>", "", result) result = re.sub(r"</abbr>", "", result) result = re.sub(r"\n", "", result) result = re.sub(r"<div.*?>.*?<li>(.*?)</li>.*?</div>", "\\1", result, re.M | re.S) return result def main(): response = urllib2.urlopen("https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers") content = response.read() tables = re.findall(r"<table class=\"wikitable sortable collapsible\">(.*?)</table>", content, re.M | re.S) table_well_known = refine_table(tables[0]) table_registered_known = refine_table(tables[1]) whole_table = "<table>" + table_well_known + table_registered_known + "</table>" tree = ElementTree.fromstring(whole_table) port_info = {} for child in tree: port = child[0].text tcp = child[1].text udp = child[2].text desc = (child[3][0].text if len(child[3]) > 0 else child[3].text).replace(",", "").replace(".", "") # skip invalid entries if not port: continue if ("Reserved" in [tcp, udp]) or ("N/A" in [tcp, udp]): continue # defaulting to TCP if (not tcp and not udp): tcp = "TCP" elif tcp and tcp.lower() in ["yes", "assigned", "?"]: tcp = "TCP" if udp and udp.lower() in ["yes", "assigned", "?"]: tcp = "TCP" # check if given is port range try: port_range = map(int, port.split("-")) except: continue port_range = [int(port)] if port.isdigit() else map(int, port.split("-")) for p in port_range: if p not in port_info: port_info[p] = [set(),[]] if tcp == "TCP": port_info[p][0].add("tcp") if udp == "UDP": port_info[p][0].add("udp") port_info[p][1].append(desc) with open("services.list", "w") as fsvcs: for port, info in sorted(port_info.items()): for proto in sorted(info[0]): svc = (" | ".join(info[1])).replace(u"\u00e9", "e").replace(u"\u2013", "-").replace(u"\u2014", "-") fsvcs.write(("%s,%s,%s" % (proto, port, svc))) fsvcs.write("\n") if __name__ == "__main__": main()
null
MMLanScan/Data/build_port_services_list.py
build_port_services_list.py
py
2,845
python
en
code
null
code-starcoder2
83
[ { "api_name": "re.sub", "line_number": 9, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 10, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 11, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 12, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 13, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 14, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 15, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 16, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 17, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 18, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 19, "usage_type": "call" }, { "api_name": "re.sub", "line_number": 20, "usage_type": "call" }, { "api_name": "re.M", "line_number": 20, "usage_type": "attribute" }, { "api_name": "re.S", "line_number": 20, "usage_type": "attribute" }, { "api_name": "urllib2.urlopen", "line_number": 26, "usage_type": "call" }, { "api_name": "re.findall", "line_number": 29, "usage_type": "call" }, { "api_name": "re.M", "line_number": 29, "usage_type": "attribute" }, { "api_name": "re.S", "line_number": 29, "usage_type": "attribute" }, { "api_name": "xml.etree.ElementTree.fromstring", "line_number": 34, "usage_type": "call" }, { "api_name": "xml.etree.ElementTree", "line_number": 34, "usage_type": "name" } ]
413003560
from datetime import datetime, timedelta from decimal import Decimal from casexml.apps.stock.models import StockTransaction, StockReport from corehq.apps.commtrack.models import StockState from corehq.apps.locations.tests.util import make_loc from corehq.apps.products.models import SQLProduct, Product from corehq.apps.sms.models import SMS from corehq.apps.sms.tests.util import setup_default_sms_test_backend, delete_domain_phone_numbers from custom.ewsghana.models import FacilityInCharge, EWSExtension from custom.ewsghana.reminders import STOCK_ON_HAND_REMINDER, SECOND_STOCK_ON_HAND_REMINDER, \ SECOND_INCOMPLETE_SOH_REMINDER, STOCKOUT_REPORT, THIRD_STOCK_ON_HAND_REMINDER, INCOMPLETE_SOH_TO_SUPER from custom.ewsghana.reminders.second_soh_reminder import SecondSOHReminder from custom.ewsghana.tasks import first_soh_reminder, second_soh_reminder, third_soh_to_super, \ stockout_notification_to_web_supers, reminder_to_visit_website, reminder_to_submit_rrirv from custom.ewsghana.tests.handlers.utils import EWSTestCase from custom.ewsghana.utils import prepare_domain, bootstrap_user, bootstrap_web_user, \ set_sms_notifications, user_needs_reminders TEST_DOMAIN = 'ews-reminders-test-domain' def create_stock_report(location, products_quantities, date=None): date = date or datetime.utcnow() sql_location = location.sql_location report = StockReport.objects.create( form_id='ews-reminders-test', domain=sql_location.domain, type='balance', date=date, server_date=date ) for product_code, quantity in products_quantities.iteritems(): StockTransaction( stock_on_hand=Decimal(quantity), report=report, type='stockonhand', section_id='stock', case_id=sql_location.supply_point_id, product_id=SQLProduct.objects.get(domain=sql_location.domain, code=product_code).product_id ).save() class TestReminders(EWSTestCase): @classmethod def setUpClass(cls): super(TestReminders, cls).setUpClass() cls.backend, cls.sms_backend_mapping = setup_default_sms_test_backend() cls.domain = prepare_domain(TEST_DOMAIN) cls.loc1 = make_loc(code="garms", name="Test RMS", type="Regional Medical Store", domain=TEST_DOMAIN) cls.loc2 = make_loc(code="tf", name="Test Facility", type="Hospital", domain=TEST_DOMAIN) cls.region = make_loc(code="region", name="Test Region", type="region", domain=TEST_DOMAIN) cls.user1 = bootstrap_user( username='test1', phone_number='1111', home_loc=cls.loc2, domain=TEST_DOMAIN, first_name='test', last_name='test1', user_data={ 'role': [] } ) cls.user2 = bootstrap_user( username='test2', phone_number='2222', home_loc=cls.loc1, domain=TEST_DOMAIN, first_name='test', last_name='test2', user_data={ 'role': ['Other'], 'needs_reminders': "False" } ) cls.user3 = bootstrap_user( username='test3', phone_number='3333', home_loc=cls.loc2, domain=TEST_DOMAIN, first_name='test', last_name='test3', user_data={ 'role': ['Nurse'], 'needs_reminders': "True" } ) cls.in_charge = bootstrap_user( username='test4', phone_number='4444', home_loc=cls.loc2, domain=TEST_DOMAIN, first_name='test', last_name='test4', user_data={ 'role': ['In Charge'] } ) cls.web_user = bootstrap_web_user( domain=TEST_DOMAIN, username='testwebuser', password='dummy', email='[email protected]', location=cls.loc2, phone_number='5555' ) EWSExtension.objects.create( domain=TEST_DOMAIN, user_id=cls.web_user.get_id, sms_notifications=True, location_id=cls.loc2.get_id ) cls.web_user2 = bootstrap_web_user( domain=TEST_DOMAIN, username='testwebuser2', password='dummy', email='[email protected]', location=cls.region, phone_number='6666' ) set_sms_notifications(TEST_DOMAIN, cls.web_user2, True) FacilityInCharge.objects.create( user_id=cls.in_charge.get_id, location=cls.loc2.sql_location ) cls.product = Product( domain=TEST_DOMAIN, name='Test Product', code_='tp', unit='each' ) cls.product.save() cls.product2 = Product( domain=TEST_DOMAIN, name='Test Product2', code_='tp2', unit='each' ) cls.product2.save() sql_product = SQLProduct.objects.get(product_id=cls.product.get_id) sql_product2 = SQLProduct.objects.get(product_id=cls.product2.get_id) sql_location1 = cls.loc1.sql_location sql_location2 = cls.loc2.sql_location sql_location1.products = [sql_product] sql_location2.products = [sql_product, sql_product2] sql_location1.save() sql_location2.save() def tearDown(self): SMS.objects.all().delete() StockState.objects.all().delete() StockReport.objects.all().delete() super(TestReminders, self).tearDown() @classmethod def tearDownClass(cls): delete_domain_phone_numbers(TEST_DOMAIN) cls.user1.delete() cls.user2.delete() cls.user3.delete() cls.domain.delete() FacilityInCharge.objects.all().delete() super(TestReminders, cls).tearDownClass() def test_needs_reminders_flag(self): self.assertFalse('needs_reminders' in self.user1.user_data) self.assertFalse(user_needs_reminders(self.user1)) self.assertEqual(self.user2.user_data['needs_reminders'], 'False') self.assertFalse(user_needs_reminders(self.user2)) self.assertEqual(self.user3.user_data['needs_reminders'], 'True') self.assertTrue(user_needs_reminders(self.user3)) def test_first_soh_reminder(self): first_soh_reminder() smses = SMS.objects.all() self.assertEqual(smses.count(), 1) self.assertEqual( smses[0].text, STOCK_ON_HAND_REMINDER % {'name': self.user3.full_name} ) def test_second_soh_reminder(self): second_soh_reminder() smses = SMS.objects.all().order_by('-date') self.assertEqual(smses.count(), 2) self.assertEqual( smses[0].text, SECOND_STOCK_ON_HAND_REMINDER % {'name': self.user3.full_name} ) self.assertEqual( smses[1].text, SECOND_STOCK_ON_HAND_REMINDER % {'name': self.user2.full_name} ) create_stock_report(self.loc1, { 'tp': 100 }) now = datetime.utcnow() second_soh_reminder() smses = smses.filter(date__gte=now) self.assertEqual(smses.count(), 1) self.assertEqual( smses[0].text, SECOND_STOCK_ON_HAND_REMINDER % {'name': self.user3.full_name} ) create_stock_report(self.loc2, { 'tp': 100 }) now = datetime.utcnow() second_soh_reminder() smses = SMS.objects.filter(date__gte=now) self.assertEqual(smses.count(), 1) self.assertEqual( smses[0].text, SECOND_INCOMPLETE_SOH_REMINDER % {'name': self.user3.full_name, 'products': 'Test Product2'} ) create_stock_report(self.loc2, { 'tp2': 100 }) now = datetime.utcnow() SecondSOHReminder(TEST_DOMAIN).send() smses = SMS.objects.filter(date__gte=now) self.assertEqual(smses.count(), 0) def test_third_soh_reminder(self): third_soh_to_super() smses = SMS.objects.all() self.assertEqual(smses.count(), 2) self.assertEqual(smses[0].text, THIRD_STOCK_ON_HAND_REMINDER % { 'name': self.web_user2.full_name, 'facility': self.loc2.name, }) self.assertEqual(smses[1].text, THIRD_STOCK_ON_HAND_REMINDER % { 'name': self.in_charge.full_name, 'facility': self.loc2.name, }) create_stock_report(self.loc2, { 'tp': 100 }) now = datetime.utcnow() third_soh_to_super() smses = SMS.objects.filter(date__gte=now) self.assertEqual(smses.count(), 2) self.assertEqual( smses[0].text, INCOMPLETE_SOH_TO_SUPER % { 'name': self.web_user2.full_name, 'facility': self.loc2.name, 'products': 'Test Product2' } ) self.assertEqual( smses[1].text, INCOMPLETE_SOH_TO_SUPER % { 'name': self.in_charge.full_name, 'facility': self.loc2.name, 'products': 'Test Product2' } ) create_stock_report(self.loc2, { 'tp2': 100 }) now = datetime.utcnow() third_soh_to_super() smses = SMS.objects.filter(date__gte=now) self.assertEqual(smses.count(), 0) def test_stockout_reminder(self): stockout_notification_to_web_supers() smses = SMS.objects.all() self.assertEqual(smses.count(), 0) create_stock_report( self.loc2, { 'tp': 0 } ) stockout_notification_to_web_supers() smses = SMS.objects.all() self.assertEqual(smses.count(), 1) last_modified_date = StockState.objects.latest('last_modified_date').last_modified_date.strftime('%b %d') self.assertEqual( smses[0].text, STOCKOUT_REPORT % { 'name': self.web_user.full_name, 'facility': self.loc2.name, 'products': 'Test Product', 'date': last_modified_date } ) set_sms_notifications(TEST_DOMAIN, self.web_user, False) self.web_user.save() now = datetime.utcnow() stockout_notification_to_web_supers() smses = SMS.objects.filter(date__gte=now) self.assertEqual(smses.count(), 0) def test_rrirv_reminder(self): reminder_to_submit_rrirv() smses = SMS.objects.all() self.assertEqual(smses.count(), 2) def test_visit_reminder(self): now = datetime.utcnow() self.web_user2.last_login = now - timedelta(weeks=1) self.web_user2.save() reminder_to_visit_website() smses = SMS.objects.all() self.assertEqual(smses.count(), 0) self.web_user2.last_login = now - timedelta(weeks=14) self.web_user2.save() reminder_to_visit_website() smses = SMS.objects.filter(date__gte=now) self.assertEqual(smses.count(), 1) set_sms_notifications(TEST_DOMAIN, self.web_user2, False) self.web_user2.save() now = datetime.utcnow() reminder_to_visit_website() smses = SMS.objects.filter(date__gte=now) self.assertEqual(smses.count(), 0)
null
custom/ewsghana/tests/test_reminders.py
test_reminders.py
py
11,416
python
en
code
null
code-starcoder2
83
[ { "api_name": "datetime.datetime.utcnow", "line_number": 26, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 26, "usage_type": "name" }, { "api_name": "casexml.apps.stock.models.StockReport.objects.create", "line_number": 28, "usage_type": "call" }, { "api_name": "casexml.apps.stock.models.StockReport.objects", "line_number": 28, "usage_type": "attribute" }, { "api_name": "casexml.apps.stock.models.StockReport", "line_number": 28, "usage_type": "name" }, { "api_name": "casexml.apps.stock.models.StockTransaction", "line_number": 36, "usage_type": "call" }, { "api_name": "decimal.Decimal", "line_number": 37, "usage_type": "call" }, { "api_name": "corehq.apps.products.models.SQLProduct.objects.get", "line_number": 42, "usage_type": "call" }, { "api_name": "corehq.apps.products.models.SQLProduct.objects", "line_number": 42, "usage_type": "attribute" }, { "api_name": "corehq.apps.products.models.SQLProduct", "line_number": 42, "usage_type": "name" }, { "api_name": "custom.ewsghana.tests.handlers.utils.EWSTestCase", "line_number": 46, "usage_type": "name" }, { "api_name": "corehq.apps.sms.tests.util.setup_default_sms_test_backend", "line_number": 51, "usage_type": "call" }, { "api_name": "custom.ewsghana.utils.prepare_domain", "line_number": 52, "usage_type": "call" }, { "api_name": "corehq.apps.locations.tests.util.make_loc", "line_number": 53, "usage_type": "call" }, { "api_name": "corehq.apps.locations.tests.util.make_loc", "line_number": 54, "usage_type": "call" }, { "api_name": "corehq.apps.locations.tests.util.make_loc", "line_number": 55, "usage_type": "call" }, { "api_name": "custom.ewsghana.utils.bootstrap_user", "line_number": 57, "usage_type": "call" }, { "api_name": "custom.ewsghana.utils.bootstrap_user", "line_number": 64, "usage_type": "call" }, { "api_name": "custom.ewsghana.utils.bootstrap_user", "line_number": 73, "usage_type": "call" }, { "api_name": "custom.ewsghana.utils.bootstrap_user", "line_number": 82, "usage_type": "call" }, { "api_name": "custom.ewsghana.utils.bootstrap_web_user", "line_number": 90, "usage_type": "call" }, { "api_name": "custom.ewsghana.models.EWSExtension.objects.create", "line_number": 99, "usage_type": "call" }, { "api_name": "custom.ewsghana.models.EWSExtension.objects", "line_number": 99, "usage_type": "attribute" }, { "api_name": "custom.ewsghana.models.EWSExtension", "line_number": 99, "usage_type": "name" }, { "api_name": "custom.ewsghana.utils.bootstrap_web_user", "line_number": 106, "usage_type": "call" }, { "api_name": "custom.ewsghana.utils.set_sms_notifications", "line_number": 115, "usage_type": "call" }, { "api_name": "custom.ewsghana.models.FacilityInCharge.objects.create", "line_number": 117, "usage_type": "call" }, { "api_name": "custom.ewsghana.models.FacilityInCharge.objects", "line_number": 117, "usage_type": "attribute" }, { "api_name": "custom.ewsghana.models.FacilityInCharge", "line_number": 117, "usage_type": "name" }, { "api_name": "corehq.apps.products.models.Product", "line_number": 122, "usage_type": "call" }, { "api_name": "corehq.apps.products.models.Product", "line_number": 130, "usage_type": "call" }, { "api_name": "corehq.apps.products.models.SQLProduct.objects.get", "line_number": 138, "usage_type": "call" }, { "api_name": "corehq.apps.products.models.SQLProduct.objects", "line_number": 138, "usage_type": "attribute" }, { "api_name": "corehq.apps.products.models.SQLProduct", "line_number": 138, "usage_type": "name" }, { "api_name": "corehq.apps.products.models.SQLProduct.objects.get", "line_number": 139, "usage_type": "call" }, { "api_name": "corehq.apps.products.models.SQLProduct.objects", "line_number": 139, "usage_type": "attribute" }, { "api_name": "corehq.apps.products.models.SQLProduct", "line_number": 139, "usage_type": "name" }, { "api_name": "corehq.apps.sms.models.SMS.objects.all", "line_number": 150, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 150, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 150, "usage_type": "name" }, { "api_name": "corehq.apps.commtrack.models.StockState.objects.all", "line_number": 151, "usage_type": "call" }, { "api_name": "corehq.apps.commtrack.models.StockState.objects", "line_number": 151, "usage_type": "attribute" }, { "api_name": "corehq.apps.commtrack.models.StockState", "line_number": 151, "usage_type": "name" }, { "api_name": "casexml.apps.stock.models.StockReport.objects.all", "line_number": 152, "usage_type": "call" }, { "api_name": "casexml.apps.stock.models.StockReport.objects", "line_number": 152, "usage_type": "attribute" }, { "api_name": "casexml.apps.stock.models.StockReport", "line_number": 152, "usage_type": "name" }, { "api_name": "corehq.apps.sms.tests.util.delete_domain_phone_numbers", "line_number": 157, "usage_type": "call" }, { "api_name": "custom.ewsghana.models.FacilityInCharge.objects.all", "line_number": 162, "usage_type": "call" }, { "api_name": "custom.ewsghana.models.FacilityInCharge.objects", "line_number": 162, "usage_type": "attribute" }, { "api_name": "custom.ewsghana.models.FacilityInCharge", "line_number": 162, "usage_type": "name" }, { "api_name": "custom.ewsghana.utils.user_needs_reminders", "line_number": 168, "usage_type": "call" }, { "api_name": "custom.ewsghana.utils.user_needs_reminders", "line_number": 171, "usage_type": "call" }, { "api_name": "custom.ewsghana.utils.user_needs_reminders", "line_number": 174, "usage_type": "call" }, { "api_name": "custom.ewsghana.tasks.first_soh_reminder", "line_number": 177, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.all", "line_number": 178, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 178, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 178, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.STOCK_ON_HAND_REMINDER", "line_number": 183, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.second_soh_reminder", "line_number": 187, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.all", "line_number": 188, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 188, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 188, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.SECOND_STOCK_ON_HAND_REMINDER", "line_number": 193, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.SECOND_STOCK_ON_HAND_REMINDER", "line_number": 198, "usage_type": "name" }, { "api_name": "datetime.datetime.utcnow", "line_number": 205, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 205, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.second_soh_reminder", "line_number": 206, "usage_type": "call" }, { "api_name": "custom.ewsghana.reminders.SECOND_STOCK_ON_HAND_REMINDER", "line_number": 212, "usage_type": "name" }, { "api_name": "datetime.datetime.utcnow", "line_number": 218, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 218, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.second_soh_reminder", "line_number": 219, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.filter", "line_number": 220, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 220, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 220, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.SECOND_INCOMPLETE_SOH_REMINDER", "line_number": 224, "usage_type": "name" }, { "api_name": "datetime.datetime.utcnow", "line_number": 230, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 230, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.second_soh_reminder.SecondSOHReminder", "line_number": 231, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.filter", "line_number": 232, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 232, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 232, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.third_soh_to_super", "line_number": 236, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.all", "line_number": 237, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 237, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 237, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.THIRD_STOCK_ON_HAND_REMINDER", "line_number": 240, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.THIRD_STOCK_ON_HAND_REMINDER", "line_number": 244, "usage_type": "name" }, { "api_name": "datetime.datetime.utcnow", "line_number": 252, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 252, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.third_soh_to_super", "line_number": 253, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.filter", "line_number": 254, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 254, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 254, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.INCOMPLETE_SOH_TO_SUPER", "line_number": 258, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.INCOMPLETE_SOH_TO_SUPER", "line_number": 266, "usage_type": "name" }, { "api_name": "datetime.datetime.utcnow", "line_number": 276, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 276, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.third_soh_to_super", "line_number": 277, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.filter", "line_number": 278, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 278, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 278, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.stockout_notification_to_web_supers", "line_number": 282, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.all", "line_number": 283, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 283, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 283, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.stockout_notification_to_web_supers", "line_number": 292, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.all", "line_number": 293, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 293, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 293, "usage_type": "name" }, { "api_name": "corehq.apps.commtrack.models.StockState.objects.latest", "line_number": 296, "usage_type": "call" }, { "api_name": "corehq.apps.commtrack.models.StockState.objects", "line_number": 296, "usage_type": "attribute" }, { "api_name": "corehq.apps.commtrack.models.StockState", "line_number": 296, "usage_type": "name" }, { "api_name": "custom.ewsghana.reminders.STOCKOUT_REPORT", "line_number": 300, "usage_type": "name" }, { "api_name": "custom.ewsghana.utils.set_sms_notifications", "line_number": 308, "usage_type": "call" }, { "api_name": "datetime.datetime.utcnow", "line_number": 311, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 311, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.stockout_notification_to_web_supers", "line_number": 312, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.filter", "line_number": 313, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 313, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 313, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.reminder_to_submit_rrirv", "line_number": 317, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.all", "line_number": 319, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 319, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 319, "usage_type": "name" }, { "api_name": "datetime.datetime.utcnow", "line_number": 323, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 323, "usage_type": "name" }, { "api_name": "datetime.timedelta", "line_number": 324, "usage_type": "call" }, { "api_name": "custom.ewsghana.tasks.reminder_to_visit_website", "line_number": 326, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.all", "line_number": 327, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 327, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 327, "usage_type": "name" }, { "api_name": "datetime.timedelta", "line_number": 329, "usage_type": "call" }, { "api_name": "custom.ewsghana.tasks.reminder_to_visit_website", "line_number": 332, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.filter", "line_number": 334, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 334, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 334, "usage_type": "name" }, { "api_name": "custom.ewsghana.utils.set_sms_notifications", "line_number": 337, "usage_type": "call" }, { "api_name": "datetime.datetime.utcnow", "line_number": 340, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 340, "usage_type": "name" }, { "api_name": "custom.ewsghana.tasks.reminder_to_visit_website", "line_number": 341, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects.filter", "line_number": 343, "usage_type": "call" }, { "api_name": "corehq.apps.sms.models.SMS.objects", "line_number": 343, "usage_type": "attribute" }, { "api_name": "corehq.apps.sms.models.SMS", "line_number": 343, "usage_type": "name" } ]
32215700
# coding=utf-8 """Search SoundCloud playlists for audio.""" from __future__ import absolute_import import os import string import sys import requests import soundcloud from tqdm import tqdm def sanitize(s): valid = '-_.() {}{}'.format(string.ascii_letters, string.digits) return ''.join(c for c in s if c in valid) if 'SOUNDCLOUD_API_KEY' in os.environ: API_KEY = os.environ['SOUNDCLOUD_API_KEY'] else: API_KEY = "81f430860ad96d8170e3bf1639d4e072" def scrape(query, include, exclude, quiet, overwrite): """Search SoundCloud and download audio from discovered playlists.""" # Launch SoundCloud client. client = soundcloud.Client(client_id=API_KEY) # Generator for yielding all results pages. def pagination(x): yield x while x.next_href: x = client.get(x.next_href) yield x # Search SoundCloud for playlists. for playlists in pagination( client.get( '/playlists', q=query, tags=','.join(include) if include else '', linked_partitioning=1, representation='compact')): # Download playlists. for playlist in playlists.collection: # Skip playlists containing filter terms. metadata = [playlist.title] if playlist.description: metadata.append(playlist.description) haystack = ' '.join(metadata).lower() if any(needle in haystack for needle in exclude): continue # Create directory for playlist. directory = sanitize(playlist.title) if directory == '': continue if not os.path.exists(directory): os.mkdir(directory) # Download tracks in playlist. for track in client.get(playlist.tracks_uri): file = os.path.join(directory, sanitize(track.title) + '.mp3') # Skip existing files. if os.path.exists(file) and not overwrite: continue # Skip tracks that are not allowed to be streamed. if not track.streamable: continue # Skip tracks named with filter terms. haystack = (track.title + ' ' + track.description + ' ' + track.tag_list).lower() if any(needle in haystack for needle in exclude): continue # Download track. r = requests.get( client.get(track.stream_url, allow_redirects=False).location, stream=True) total_size = int(r.headers['content-length']) chunk_size = 1000000 # 1 MB chunks with open(file, 'wb') as f: for data in tqdm( r.iter_content(chunk_size), desc=track.title, total=total_size / chunk_size, unit='MB', file=sys.stdout): f.write(data)
null
audioscrape/soundcloud.py
soundcloud.py
py
3,206
python
en
code
null
code-starcoder2
83
[ { "api_name": "string.ascii_letters", "line_number": 15, "usage_type": "attribute" }, { "api_name": "string.digits", "line_number": 15, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 19, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 20, "usage_type": "attribute" }, { "api_name": "soundcloud.Client", "line_number": 29, "usage_type": "call" }, { "api_name": "os.path.exists", "line_number": 62, "usage_type": "call" }, { "api_name": "os.path", "line_number": 62, "usage_type": "attribute" }, { "api_name": "os.mkdir", "line_number": 63, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 67, "usage_type": "call" }, { "api_name": "os.path", "line_number": 67, "usage_type": "attribute" }, { "api_name": "os.path.exists", "line_number": 70, "usage_type": "call" }, { "api_name": "os.path", "line_number": 70, "usage_type": "attribute" }, { "api_name": "requests.get", "line_number": 84, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 91, "usage_type": "call" }, { "api_name": "sys.stdout", "line_number": 96, "usage_type": "attribute" } ]
25071322
import os import kmeans from setuptools import setup from distutils.extension import Extension here = os.path.abspath(os.path.dirname(__file__)) ckmeans = Extension( 'lib', sources=['lib.c'], extra_compile_args=['-O3', '-std=c99'] ) def read(*filenames, **kwargs): encoding = kwargs.get('encoding', 'utf-8') sep = kwargs.get('sep', '\n\n') contents = [] for filename in filenames: with open(filename, encoding=encoding) as f: contents.append(f.read()) return sep.join(contents) long_description = read('README.rst', 'LICENSE') setup( name='kmeans', version=kmeans.version, url='http://github.com/numberoverzero/kmeans/', license='MIT', author='Joe Cross', install_requires=[], author_email='[email protected]', description='python wrapper for basic c implementation of kmeans', long_description=long_description, include_package_data=True, platforms='any', classifiers=[ 'Programming Language :: Python', 'Development Status :: 3 - Alpha', 'Natural Language :: English', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], extras_require={ 'testing': ['pytest'], }, ext_modules=[ckmeans] )
null
setup.py
setup.py
py
1,413
python
en
code
null
code-starcoder2
83
[ { "api_name": "os.path.abspath", "line_number": 6, "usage_type": "call" }, { "api_name": "os.path", "line_number": 6, "usage_type": "attribute" }, { "api_name": "os.path.dirname", "line_number": 6, "usage_type": "call" }, { "api_name": "distutils.extension.Extension", "line_number": 7, "usage_type": "call" }, { "api_name": "setuptools.setup", "line_number": 26, "usage_type": "call" }, { "api_name": "kmeans.version", "line_number": 28, "usage_type": "attribute" } ]
574155818
from __future__ import print_function import sys from itertools import combinations __author__ = 'Joshua' def opendata(data): transfordata = [] input_data = data.readlines() for data_line in input_data: tmp = data_line.strip().split(',') transfordata.append(tmp) return transfordata # [['d', 'e', 'a'], ['d', 'b', 'e', 'f']] def count_items(data, support): frequent_items = {} for each_line in data: for each_items in each_line: frequent_items[each_items] = frequent_items.get(each_items, 0) + 1 for each in frequent_items.keys(): if frequent_items[each] < support: del (frequent_items[each]) print(sorted(list(frequent_items)), '\n') return sorted(list(frequent_items)) # ['a', 'b', 'c', 'd', 'e', 'f', 'g'] def find_pairs(data, num): return list(combinations(data, num)) # [('a', 'b'), ('a', 'c')] def count_more_pairs(data, raw_data, num): candidate_raw_morepairs = list(combinations(data, num)) candidate_items_3 = [] for i in range(len(candidate_raw_morepairs)): count = 0 for j in range(len(raw_data)): if set(candidate_raw_morepairs[i]).issuperset(set(raw_data[j])): count += 1 if count > (num - 1): candidate_items_3.append(sorted(candidate_raw_morepairs[i])) return candidate_items_3 def hash_function(data, raw_data, buckets, support): temp_list = [] for each in data: num = 0 for items in range(len(raw_data)): if set(each).issubset(set(raw_data[items])): num += 1 temp_list.append([each, num]) # ('a', 'b'), 8 pairsoutput = [temp_list[i][0] for i in range(len(temp_list)) if temp_list[i][1] >= support] output = [[temp_list.index(temp_list[each]) % buckets, temp_list[each][1]] for each in range(len(temp_list))] pairs_tmv = [[temp_list.index(temp_list[each]) % buckets, temp_list[each][0]] for each in range(len(temp_list))] # [0, ('a', 'b') for j in range(len(output)): for k in range(j + 1, len(output) - 1): if output[j][0] == output[k][0]: output[k][1] = output[j][1] + output[k][1] del (output[j]) output1 = [] for each in range(len(output)): if output[each][1] >= support: bitmap = 1 output1.append((output[each][0], bitmap)) # bitmap output pairs_output = [] for each in dict(output1): for i in range(len(pairs_tmv)): if pairs_tmv[i][0] == each: pairs_output.append(list(pairs_tmv[i][1])) if len(pairsoutput) == 0: return pairs_output, dict(output) else: print(dict(output), '\n', sorted(pairsoutput), '\n') return pairs_output, dict(output) if __name__ == '__main__': # inputdata = open(sys.argv[1]) inputdata = open('input.txt') # support = int(sys.argv[2]) support = 4 # buckets = int(sys.argv[3]) buckets = 20 raw_data = opendata(inputdata) sorted_items = count_items(raw_data, support) candidate_raw_pairs = find_pairs(sorted_items, 2) for num in range(3, len(sorted_items)): candidate_pairs, bitmap = hash_function(candidate_raw_pairs, raw_data, buckets, support) if len(candidate_pairs) == 0: break else: candidate_raw_pairs = count_more_pairs(sorted_items, candidate_raw_pairs, num)
null
Code2_Hash/bozhao_li_pcy.py
bozhao_li_pcy.py
py
3,059
python
en
code
null
code-starcoder2
83
[ { "api_name": "itertools.combinations", "line_number": 30, "usage_type": "call" }, { "api_name": "itertools.combinations", "line_number": 34, "usage_type": "call" } ]
425579551
# MODEL m15_4M_01 - NN CLASS import torch import torch.nn as nn import numpy as np import pandas as pd # artificial neural network class ANN(nn.Module): def __init__(self, input_size_swl, input_size, hidden_size, output_size, num_lstm_layers, bias = True): super(ANN, self).__init__() self.num_lstm_layers = num_lstm_layers self.input_size_swl = input_size_swl self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size # lstm layer with the swl in the inout --> input_size = 15 self.lstm_swl = nn.LSTM(input_size_swl, hidden_size, num_lstm_layers, batch_first = True) # lstm layer without the swl in the inout --> input_size = 14 self.lstm = nn.LSTM(input_size, hidden_size, num_lstm_layers, batch_first = True) # readout layer self.fc1 = nn.Linear(hidden_size * output_size, output_size) nn.init.normal_(self.fc1.bias, mean=0.0, std=1.0) self.Sigmoid = nn.Sigmoid() def forward(self, input_swl, input): # set initial hidden and cell state h_0 = torch.zeros(self.num_lstm_layers, input.size(0), self.hidden_size) c_0 = torch.zeros(self.num_lstm_layers, input.size(0), self.hidden_size) # forward propagate out_swl, (hn_swl, cn_swl) = self.lstm_swl(input_swl, (h_0, c_0)) out_lstm, (hn, cn) = self.lstm(input, (hn_swl, cn_swl)) out = torch.cat((out_swl, out_lstm), 1) out = out.reshape(input.size(0), 1, self.hidden_size * self.output_size) out = self.fc1(out) out = self.Sigmoid(out) return out
null
m15_4M_01/m15_4M_01_03_nn_class.py
m15_4M_01_03_nn_class.py
py
1,537
python
en
code
null
code-starcoder2
83
[ { "api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 9, "usage_type": "name" }, { "api_name": "torch.nn.LSTM", "line_number": 19, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 19, "usage_type": "name" }, { "api_name": "torch.nn.LSTM", "line_number": 22, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 22, "usage_type": "name" }, { "api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 25, "usage_type": "name" }, { "api_name": "torch.nn.init.normal_", "line_number": 26, "usage_type": "call" }, { "api_name": "torch.nn.init", "line_number": 26, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 26, "usage_type": "name" }, { "api_name": "torch.nn.Sigmoid", "line_number": 27, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 27, "usage_type": "name" }, { "api_name": "torch.zeros", "line_number": 31, "usage_type": "call" }, { "api_name": "torch.zeros", "line_number": 32, "usage_type": "call" }, { "api_name": "torch.cat", "line_number": 37, "usage_type": "call" } ]
646526005
import argparse import csv from pathlib import Path def read_csv(src_file, output_file): with open(src_file) as file_object: reader = csv.reader(file_object) header_row = next(reader) if output_file: f = open(output_file, "w") for row in reader: f.write(str(row)) else: for row in reader: print(row) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Description of your app.') parser.add_argument('inDir', type=Path, help='Path to the file to be read from.') parser.add_argument('-d', '--destination', type=Path, help='The name of the file to store the read file in') args = parser.parse_args() result = read_csv(args.inDir, args.destination) print("You have written from: "+str(args.inDir) + " to file: "+str(args.destination))
null
modules/ex2.py
ex2.py
py
944
python
en
code
null
code-starcoder2
83
[ { "api_name": "csv.reader", "line_number": 8, "usage_type": "call" }, { "api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call" }, { "api_name": "pathlib.Path", "line_number": 21, "usage_type": "name" }, { "api_name": "pathlib.Path", "line_number": 23, "usage_type": "name" } ]
415903220
#!/usr/bin/env python # -*- coding: utf-8 -*- """desc""" import torch.nn as nn import torch.nn.functional as F from .attention import MultiHeadedAttention from .utils import PositionwiseFeedForward class TransformerEncoderLayer(nn.Module): """ Bidirectional Encoder = Transformer (self-attention) Transformer = MultiHead_Attention + Feed_Forward with sublayer connection Args: d_model: head_count: number of heads in multi-head attention dim_feed_forward: dim_feed_forward, usually 4*d_model_size dropout: dropout rate """ def __init__(self, d_model, head_count, dim_feed_forward, dropout): super().__init__() self.d_model = d_model self.head_count = head_count self.dim_feed_forward = dim_feed_forward self.dropout = dropout self.self_attention = MultiHeadedAttention(head_count=head_count, d_model=d_model, dropout=dropout) self.feed_forward = PositionwiseFeedForward(d_model=d_model, d_ff=dim_feed_forward, dropout=dropout) self.layer_norm_attn = nn.LayerNorm(normalized_shape=d_model, eps=1e-6) self.layer_norm_ff = nn.LayerNorm(normalized_shape=d_model, eps=1e-6) def forward(self, x, mask): """ Add/Norm ๅœจ่ฟ™้‡Œๅผ•ๅ…ฅ args: x: (B, Tx, d_model) mask: (B, 1, Tx)? """ x_norm = self.layer_norm_attn(x) context, _ = self.self_attention(x_norm, x_norm, x_norm, mask) context = F.dropout(context, p=self.dropout, training=self.training) x = x + context # Residual x_norm = self.layer_norm_ff(x) output = self.feed_forward(x_norm) output = F.dropout(x, p=self.dropout, training=self.training) output = output + x return output
null
bert_pytorch/model/transformer.py
transformer.py
py
1,805
python
en
code
null
code-starcoder2
83
[ { "api_name": "torch.nn.Module", "line_number": 12, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 12, "usage_type": "name" }, { "api_name": "attention.MultiHeadedAttention", "line_number": 32, "usage_type": "call" }, { "api_name": "utils.PositionwiseFeedForward", "line_number": 33, "usage_type": "call" }, { "api_name": "torch.nn.LayerNorm", "line_number": 34, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 34, "usage_type": "name" }, { "api_name": "torch.nn.LayerNorm", "line_number": 35, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 35, "usage_type": "name" }, { "api_name": "torch.nn.functional.dropout", "line_number": 48, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name" }, { "api_name": "torch.nn.functional.dropout", "line_number": 53, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 53, "usage_type": "name" } ]
424472261
from configparser import ConfigParser def config(filename, section): parser = ConfigParser() parser.read(filename) configs = {} if parser.has_section(section): params = parser.items(section) for param in params: configs[param[0]] = param[1] else: raise Exception('Section {} not found in {} file.'.format(section, filename)) return configs
null
config.py
config.py
py
407
python
en
code
null
code-starcoder2
83
[ { "api_name": "configparser.ConfigParser", "line_number": 4, "usage_type": "call" } ]
629166725
##############################################################################80 # # # Raw-data Reader # # # # (2020) Nicolo Fabbiane # # # ################################################################################ class RawData: """ """ def __init__(self, path='data', git_url=None, git_env={}, data_fmt='jhu'): """ """ # store data self.path = path self.git_url = git_url self.git_env = git_env self.data_fmt = data_fmt self.time = {} self.confirmed = {} self.recovered = {} self.deaths = {} self.active = {} self.intensive = {} # initialize self.update_git() # read data self.read_data() #___________________________________________________________________________ # def update_git(self): """ """ # modules import os from git import Repo # does the repo exists? if os.path.isdir(self.path): # get repo origin origin = Repo(self.path).remote() # update (pull) origin.pull(env=self.git_env) elif not(self.git_url is None): # clone repo from url repo = Repo.clone_from(self.git_url, self.path, env=self.git_env) #___________________________________________________________________________ # def read_data(self): """ """ # select reader if self.data_fmt is 'jhu': # John Hopkins University data = read_data_jhu(self) elif self.data_fmt is 'dpc': # Dipartimento della protezione Civile data = read_data_dpc(self) elif self.data_fmt is 'ofr': # OpenCoVid19-fr data = read_data_ofr(self) else: # default (John Hopkins University) data = read_data_jhu(self) # unpack data self.time = data[0] self.confirmed = data[1] self.recovered = data[2] self.deaths = data[3] self.active = data[4] self.intensive = data[5] #___________________________________________________________________________ # def get_time(self): """ """ return self.time #___________________________________________________________________________ # def get_data_for_regions(self, regions): """ """ # check inputs if not(isinstance(regions, list)): regions = list(self.confirmed.keys()) regions+= list(self.recovered.keys()) regions+= list(self.deaths.keys()) regions = list(set(regions)) # initialize output confirmed = [0]*len(self.time) recovered = [0]*len(self.time) deaths = [0]*len(self.time) active = [0]*len(self.time) intensive = [0]*len(self.time) # loop on regions for region in regions: if region in self.confirmed.keys(): for i, data in enumerate(self.confirmed[region]): confirmed[i]+= data else: print('WARNING! %s not found in confirmed' %(region)) if region in self.recovered.keys(): for i, data in enumerate(self.recovered[region]): recovered[i]+= data else: print('WARNING! %s not found in recovered' %(region)) if region in self.deaths.keys(): for i, data in enumerate(self.deaths[region]): deaths[i]+= data else: print('WARNING! %s not found in deaths' %(region)) if region in self.active.keys(): for i, data in enumerate(self.active[region]): active[i]+= data else: print('WARNING! %s not found in active' %(region)) if region in self.intensive.keys(): for i, data in enumerate(self.intensive[region]): intensive[i]+= data else: print('WARNING! %s not found in intensive' %(region)) # output return confirmed, recovered, deaths, active, intensive #___________________________________________________________________________ # ################################################################################ # Support functions #_______________________________________________________________________________ # def read_data_jhu(rawdata): """ """ # modules import os # path to time-series files timeseries_path = os.path.join(rawdata.path, 'csse_covid_19_data', 'csse_covid_19_time_series') # read confirmed cases timeseries_file = os.path.join(timeseries_path, 'time_series_19-covid-Confirmed.csv') time, confirmed = read_file_jhu(timeseries_file) # read recovered cases timeseries_file = os.path.join(timeseries_path, 'time_series_19-covid-Recovered.csv') time, recovered = read_file_jhu(timeseries_file) # read deaths timeseries_file = os.path.join(timeseries_path, 'time_series_19-covid-Deaths.csv') time, deaths = read_file_jhu(timeseries_file) # compute active cases (and dummy intensive care) active = {}; intensive = {} for region in confirmed.keys(): active[region] = [0]*len(confirmed[region]) intensive[region] = [0]*len(confirmed[region]) for i in range(len(confirmed[region])): active[region][i] = confirmed[region][i] active[region][i]-= recovered[region][i] + deaths[region][i] # output return time, confirmed, recovered, deaths, active, intensive #_______________________________________________________________________________ # def read_file_jhu(filename): """ """ # modules import datetime as dt # open file f = open(filename, 'r') # read header (time-stamps) line = f.readline().rstrip('\r\n').split(',') time = [dt.datetime.strptime(t, '%m/%d/%y') for t in line[4:]] # loop on entries data_by_region={} for l in f: # split line line = l.rstrip('\r\n').rsplit(',', len(time)+3) # get region region = line[1] # initialize region if needed if not(region in data_by_region.keys()): data_by_region[region] = [0]*len(time) # add data to region for i, data in enumerate(line[4:]): try: data_by_region[region][i]+= int(data) except: data_by_region[region][i]+= 0 # output return time, data_by_region #_______________________________________________________________________________ # def read_data_dpc(rawdata): """ """ # modules import os import datetime as dt # path to time-series files timeseries_path = os.path.join(rawdata.path, 'dati-regioni') # read confirmed cases timeseries_file = os.path.join(timeseries_path, 'dpc-covid19-ita-regioni.csv') # initilize output time = [] confirmed = {} recovered = {} deaths = {} active = {} intensive = {} # open file f = open(timeseries_file, 'r') # skip header f.readline() # loop on entries for l in f: # split line line = l.rstrip('\r\n').rsplit(',') # get time t = dt.datetime.strptime(line[0], '%Y-%m-%d %H:%M:%S') t = dt.datetime(*t.timetuple()[:3]) if not(t in time): time.append(t) # get region name region = line[3] # get confirmed if not(region in confirmed.keys()): confirmed[region]=[0]*(len(time)-1) confirmed[region]+= [int(line[14])] # get recovered if not(region in recovered.keys()): recovered[region]=[0]*(len(time)-1) recovered[region]+= [int(line[12])] # get deaths if not(region in deaths.keys()): deaths[region]=[0]*(len(time)-1) deaths[region]+= [int(line[13])] # get active if not(region in active.keys()): active[region]=[0]*(len(time)-1) active[region]+= [int(line[10])] # get intensive if not(region in intensive.keys()): intensive[region]=[0]*(len(time)-1) intensive[region]+= [int(line[7])] # output return time, confirmed, recovered, deaths, active, intensive #_______________________________________________________________________________ # def read_data_ofr(rawdata): """ """ # modules import os import datetime as dt # path to time-series files timeseries_path = os.path.join(rawdata.path, 'dist') # read confirmed cases timeseries_file = os.path.join(timeseries_path, 'chiffres-cles.csv') # initilize output time = [] confirmed = {} recovered = {} deaths = {} active = {} intensive = {} # open file f = open(timeseries_file, 'r') # skip header f.readline() # loop on entries for l in f: # split line line = l.rstrip('\r\n').rsplit(',') line = ['0' if c == '' else c for c in line] if ('REG' in line[2])|('FRA' in line[2]): if ('et de la Sant' in line[9])|('ARS' in line[9]): # get time t = dt.datetime.strptime(line[0], '%Y-%m-%d') t = dt.datetime(*t.timetuple()[:3]) if not(t in time): time.append(t) # get region name region = line[3] # get confirmed if not(region in confirmed.keys()): confirmed[region]=[] confirmed[region]+= [0]*(len(time)-len(confirmed[region])-1) confirmed[region]+= [int(line[4])] # get deaths if not(region in deaths.keys()): deaths[region]=[] deaths[region]+= [0]*(len(time)-len(deaths[region])-1) deaths[region]+= [int(line[5])] # get intensive if not(region in intensive.keys()): intensive[region]=[] intensive[region]+= [0]*(len(time)-len(intensive[region])-1) intensive[region]+= [int(line[6])] # get recovered if not(region in recovered.keys()): recovered[region]=[] recovered[region]+= [0]*(len(time)-len(recovered[region])-1) recovered[region]+= [int(line[8])] # initialize active if not(region in active.keys()): active[region]=[] # fix_data fix_data_ofr(time, confirmed) fix_data_ofr(time, deaths) fix_data_ofr(time, recovered) # compute active for region, data in active.iteritems(): active[region] = [c-r-d for c, r, d in zip(confirmed[region], deaths[region], recovered[region])] # output return time, confirmed, recovered, deaths, active, intensive #_______________________________________________________________________________ # def fix_data_ofr(time, data_dict): for region, data in data_dict.iteritems(): for i, point in enumerate(data): if i == 0: mindata = point elif point < mindata: data_dict[region][i]=mindata else: mindata = point return data #_______________________________________________________________________________ #
null
libpy/rawdata.py
rawdata.py
py
9,915
python
en
code
null
code-starcoder2
83
[ { "api_name": "os.path.isdir", "line_number": 39, "usage_type": "call" }, { "api_name": "os.path", "line_number": 39, "usage_type": "attribute" }, { "api_name": "git.Repo", "line_number": 41, "usage_type": "call" }, { "api_name": "git.Repo.clone_from", "line_number": 46, "usage_type": "call" }, { "api_name": "git.Repo", "line_number": 46, "usage_type": "name" }, { "api_name": "os.path.join", "line_number": 131, "usage_type": "call" }, { "api_name": "os.path", "line_number": 131, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 133, "usage_type": "call" }, { "api_name": "os.path", "line_number": 133, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 136, "usage_type": "call" }, { "api_name": "os.path", "line_number": 136, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 139, "usage_type": "call" }, { "api_name": "os.path", "line_number": 139, "usage_type": "attribute" }, { "api_name": "datetime.datetime.strptime", "line_number": 162, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 162, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 188, "usage_type": "call" }, { "api_name": "os.path", "line_number": 188, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 190, "usage_type": "call" }, { "api_name": "os.path", "line_number": 190, "usage_type": "attribute" }, { "api_name": "datetime.datetime.strptime", "line_number": 207, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 207, "usage_type": "attribute" }, { "api_name": "datetime.datetime", "line_number": 208, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 238, "usage_type": "call" }, { "api_name": "os.path", "line_number": 238, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 240, "usage_type": "call" }, { "api_name": "os.path", "line_number": 240, "usage_type": "attribute" }, { "api_name": "datetime.datetime.strptime", "line_number": 260, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 260, "usage_type": "attribute" }, { "api_name": "datetime.datetime", "line_number": 261, "usage_type": "call" } ]
366246319
from BugTracker.models import BugScreenShots, Bug from django.http import HttpResponse import logging import json import shutil from TeamTrack.settings import STATIC_URL log = logging.getLogger(__name__) def UpdateScreenshots(sender, instance, **kwargs): try: BugScreenShotsObj = BugScreenShots.objects.filter(imageId=instance.id) with open("%s%s" % (STATIC_URL, "Config/BugTrackerConfig.json"), 'r') as BugPathObj: BugPathObj = json.loads(BugPathObj) TempDestination = "%s" % BugPathObj.BugScreenshotTempFolder FinalDestination = instance.imagePath for screenshot in BugScreenShotsObj: newtempDestination = "%s%s" % (TempDestination, screenshot.imageName) shutil.move(FinalDestination, newtempDestination) return HttpResponse("Success") except Exception as ex: log.exception(ex) return HttpResponse(ex)
null
BugTracker/Signals.py
Signals.py
py
922
python
en
code
null
code-starcoder2
83
[ { "api_name": "logging.getLogger", "line_number": 7, "usage_type": "call" }, { "api_name": "BugTracker.models.BugScreenShots.objects.filter", "line_number": 11, "usage_type": "call" }, { "api_name": "BugTracker.models.BugScreenShots.objects", "line_number": 11, "usage_type": "attribute" }, { "api_name": "BugTracker.models.BugScreenShots", "line_number": 11, "usage_type": "name" }, { "api_name": "TeamTrack.settings.STATIC_URL", "line_number": 12, "usage_type": "name" }, { "api_name": "json.loads", "line_number": 13, "usage_type": "call" }, { "api_name": "shutil.move", "line_number": 18, "usage_type": "call" }, { "api_name": "django.http.HttpResponse", "line_number": 20, "usage_type": "call" }, { "api_name": "django.http.HttpResponse", "line_number": 23, "usage_type": "call" } ]
32506196
#!/usr/local/bin/python3 # coding: UTF-8 # Author: David # Email: [email protected] # Created: 2017-05-09 18:43 # Last modified: 2017-05-09 18:51 # Filename: subprocess_streams.py # Description: import asyncio import asyncio.subprocess import sys async def get_date(): code = 'import datetime; print(datetime.datetime.now())' create = asyncio.create_subprocess_exec(sys.executable, '-c', code, stdout=asyncio.subprocess.PIPE) proc = await create data = await proc.stdout.readline() line = data.decode('ascii').rstrip() await proc.wait() return line if sys.platform == 'win32': loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() date = loop.run_until_complete(get_date()) print('Current date: {}'.format(date)) loop.close()
null
Python/asyncio/subprocess_streams.py
subprocess_streams.py
py
874
python
en
code
null
code-starcoder2
83
[ { "api_name": "asyncio.create_subprocess_exec", "line_number": 17, "usage_type": "call" }, { "api_name": "sys.executable", "line_number": 17, "usage_type": "attribute" }, { "api_name": "asyncio.subprocess", "line_number": 18, "usage_type": "attribute" }, { "api_name": "sys.platform", "line_number": 29, "usage_type": "attribute" }, { "api_name": "asyncio.ProactorEventLoop", "line_number": 30, "usage_type": "call" }, { "api_name": "asyncio.set_event_loop", "line_number": 31, "usage_type": "call" }, { "api_name": "asyncio.get_event_loop", "line_number": 33, "usage_type": "call" } ]
182488865
from websocket import create_connection import threading import asyncio active = True server_message = '' #variables from server: #encounter_state (WAITING, IN_PROGRESS, ENDED_FAIL, ENDED_WIN, UDEAD) #boss_balance (integer - balance remaining) #num_musos (integer - how many musos are connected) #muso_list (list - from json - of muso dicts) #Listener thread receives responses from the server #It writes to global variables for access via other functions def listener(server): global server_message while active: server_message = server.recv() print(server_message, end='\n> ') def send(message): global server server.send(message) def disconnect(): global server global active active = False server.close() #Usernames are sent using ||: as a delimiter #For robustness in future, should be base64 encoded or similar def connect(name, ip, port=8765): global server server = create_connection(f'ws://{ip}:{port}') server.send(f'||:{name}') conn_id = server.recv() print(conn_id) message = '' listener_thread = threading.Thread(target = listener, args=(server,)) listener_thread.start()
null
Client/lib_chestral.py
lib_chestral.py
py
1,132
python
en
code
null
code-starcoder2
83
[ { "api_name": "websocket.create_connection", "line_number": 37, "usage_type": "call" }, { "api_name": "threading.Thread", "line_number": 42, "usage_type": "call" } ]
529343989
#!/usr/bin/env python # -*- coding: utf-8 -*- # # combinator.py # # Copyright 2017 Bruno S <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # """combinator module from ProperImage, for coadding astronomical images. Written by Bruno SANCHEZ PhD of Astromoy - UNC [email protected] Instituto de Astronomia Teorica y Experimental (IATE) UNC Cordoba - Argentina Of 301 """ import numpy as np from multiprocessing import Process try: import cPickle as pickle # noqa except ImportError: import pickle try: import pyfftw # noqa _fftwn = pyfftw.interfaces.numpy_fft.fftn # noqa _ifftwn = pyfftw.interfaces.numpy_fft.ifftn # noqa except ImportError: _fftwn = np.fft.fft2 _ifftwn = np.fft.ifft2 class StackCombinator(Process): """Combination engine. An engine for image combination in parallel, using multiprocessing.Process class. Uses an ensemble of images and a queue to calculate the propercoadd of the list of images. Parameters ---------- img_list: list or tuple list of SingleImage instances used in the combination process queue: multiprocessing.Queue instance an instance of multiprocessing.Queue class where to pickle the intermediate results. shape: shape of the images being coadded. stack: boolean, default True Whether to stack the results for coadd or just obtain individual image calculations. If True it will pickle in queue a coadded image of the chunk's images. If False it will pickle in queue a list of individual matched filtered images. fourier: boolean, default False. Whether to calculate individual fourier transform of each s_component image. If stack is True this parameter will be ignored. If stack is False, and fourier is True, the pickled object will be a tuple of two values, with the first one containing the list of s_components, and the second one containing the list of fourier transformed s_components. Returns ------- Combinator process An instance of Combinator. This can be launched like a multiprocessing.Process Example ------- queue1 = multiprocessing.Queue() queue2 = multiprocessing.Queue() p1 = Combinator(list1, queue1) p2 = Combinator(list2, queue2) p1.start() p2.start() #results are in queues result1 = queue1.get() result2 = queue2.get() p1.join() p2.join() """ def __init__(self, img_list, queue, shape, stack=True, fourier=False, *args, **kwargs): super(StackCombinator, self).__init__(*args, **kwargs) self.list_to_combine = img_list self.queue = queue self.global_shape = shape print(self.global_shape) # self.zps = ensemble.transparencies def run(self): S_hat = np.zeros(self.global_shape).astype(np.complex128) psf_hat_sum = np.zeros(self.global_shape).astype(np.complex128) mix_mask = self.list_to_combine[0].pixeldata.mask for an_img in self.list_to_combine: np.add(an_img.s_hat_comp, S_hat, out=S_hat, casting='same_kind') np.add(((an_img.zp/an_img.var)**2)*an_img.psf_hat_sqnorm(), psf_hat_sum, out=psf_hat_sum) # , casting='same_kind') # psf_hat_sum = ((an_img.zp/an_img.var)**2)*an_img.psf_hat_sqnorm() mix_mask = np.ma.mask_or(mix_mask, an_img.pixeldata.mask) serialized = pickle.dumps([S_hat, psf_hat_sum, mix_mask]) self.queue.put(serialized) return
null
properimage/combinator.py
combinator.py
py
4,312
python
en
code
null
code-starcoder2
83
[ { "api_name": "pyfftw.interfaces", "line_number": 48, "usage_type": "attribute" }, { "api_name": "pyfftw.interfaces", "line_number": 49, "usage_type": "attribute" }, { "api_name": "numpy.fft", "line_number": 51, "usage_type": "attribute" }, { "api_name": "numpy.fft", "line_number": 52, "usage_type": "attribute" }, { "api_name": "multiprocessing.Process", "line_number": 55, "usage_type": "name" }, { "api_name": "numpy.zeros", "line_number": 123, "usage_type": "call" }, { "api_name": "numpy.complex128", "line_number": 123, "usage_type": "attribute" }, { "api_name": "numpy.zeros", "line_number": 124, "usage_type": "call" }, { "api_name": "numpy.complex128", "line_number": 124, "usage_type": "attribute" }, { "api_name": "numpy.add", "line_number": 128, "usage_type": "call" }, { "api_name": "numpy.add", "line_number": 129, "usage_type": "call" }, { "api_name": "numpy.ma.mask_or", "line_number": 132, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 132, "usage_type": "attribute" }, { "api_name": "pickle.dumps", "line_number": 134, "usage_type": "call" } ]
227421340
import datetime from dateutil import parser months = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'} endings = {1: 'st', 2: 'nd', 3: 'rd'} def toDate(date): date = parser.parse(date) ending = "th" if date.day == 1: ending = endings[1] elif date.day == 2: ending = endings[2] elif date.day == 3: ending = endings[3] outputText = months[date.month] + " " + str(date.day) + ending + ", " + str(date.year) + " " + amOrPm(date.hour, date.minute) return outputText def amOrPm(hour, minute): hourMinuteString = "{}:{}".format(hour, minute) d = datetime.datetime.strptime(hourMinuteString, "%H:%M") return d.strftime("%I:%M %p")
null
toDate.py
toDate.py
py
803
python
en
code
null
code-starcoder2
83
[ { "api_name": "dateutil.parser.parse", "line_number": 8, "usage_type": "call" }, { "api_name": "dateutil.parser", "line_number": 8, "usage_type": "name" }, { "api_name": "datetime.datetime.strptime", "line_number": 22, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute" } ]
309026448
#Import import cv2 import pdb import numpy as np import matplotlib.pyplot as plt #Function: def isColor(pixel, color): if pixel[0] != color[0]: return False if pixel[1] != color[1]: return False if pixel[2] != color[2]: return False return True def imgEdges(img, write = 0): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 50, 100, apertureSize = 3) return edges def detectLines(img): height, width = img.shape # Encontra todas as pequenas linhas lines = cv2.HoughLinesP(img, 1, np.pi/180, 250, minLineLenght, maxLineGap) # Expande as linhas para o comeco e fim da img lines2 = [0] * len(lines) for i in range(len(lines)): for x1, y1, x2, y2 in lines[i]: a = (y2 - y1)/(x2 - x1) b = (y1*(x2 - x1) - x1*(y2 - y1))/(x2 - x1) yZero = int(b) yWidth = int(a*width + b) lines2[i] = (0, yZero, width, yWidth, a, b) # Faz a projecao horizontal sss = set() for x1, y1, x2, y2, a, b in lines2: for x in range(x1, x2): y = int(a*x + b) sss.add( (x, y) ) projections = [0] * height for x, y in sss: projections[y] += 1 projections = [v/float(width) for v in projections] # Detecta as linhas utilizando a projecao encontrada linesCut = [] line = 0 noGreenGap = 0 firstLine = 0 for i in range(len(projections)): if firstLine == 0: if projections[i] > .7: firstLine = 1 line = i noGreenGap = 0 else: if projections[i] > .7: line = i noGreenGap = 0 else: noGreenGap += 1 if noGreenGap == 30: linesCut.append(line) firstLine = 0 return linesCut, projections, lines2 def projectionInX(img, height, width): blur = cv2.GaussianBlur(img,(5,5),0) ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) kernel = np.ones((5,5),np.uint8) dilation = cv2.dilate(th3,kernel,iterations = 1) projection = [0]*width for i in range(width): for j in range(height): if (dilation[j,i] == 255): projection[i] += 1 return projection def smooth(projection, window_sizen = 3): filtered_projection = [] for i in range(len(projection)): summ = 0 for j in range(window_size): summ += projection[i-j-1] summ += projection[(i+j+1)%len(projection)] summ /= window_size * 2 filtered_projection.append(summ) return filtered_projection #-------------------------------------------- #HoughLinesP: minLineLenght = 50 maxLineGap = 10 offset = 5 #-------------------------------------------- img = cv2.imread('1-simpleImage.png') height, width, _ = img.shape edges = imgEdges(img) lines, projection, candidates = detectLines(edges) #Gerar todas as linhas cortadas: #for i in range(len(lines)): # roi = img[lines[i-1] - offset:lines[i] + offset, 0:width] # cv2.imwrite(str(i) + ".png", roi) #Gerar apenas uma linha cortada: roi = img[0:lines[0] + offset, 0:width] cv2.imwrite(str(1) + ".png", roi) img = cv2.imread("1.png") height, width, _ = img.shape edges = imgEdges(img) projection = projectionInX(edges, height, width) plt.plot(projection) plt.show() #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #edges = cv2.Canny(gray, 50, 100, apertureSize=3) #cv2.imwrite("edgesImage.png", edges) #img = cv2.imread("edgesImage.png") #height, width, null = img.shape #print ("Cรกlcular as projeรงรตes.") #projection = [0] * width #for i in range(width): # for j in range(height): # if iswhite(img[j,i]): # projection[i] += sum(img[j,i]) * j #print (projection) #plt.plot(smooth(projection, 1)) def platos(projection, min_size=7, max_difference=10000): last = 0 y = [] x = [] for i in range(1, len(projection)): if abs(projection[i]-projection[last]) > max_difference: if i-last > min_size: pos = int((i+last)/2) x.append(pos) y.append(projection[pos]) last = i return x, y #x, y = platos(projection) #print(x,y) #plt.plot(projection) #plt.plot(x, y, 'ro') #plt.show()
null
DetectWords/DetectLinesManyLines.py
DetectLinesManyLines.py
py
4,421
python
en
code
null
code-starcoder2
83
[ { "api_name": "cv2.cvtColor", "line_number": 19, "usage_type": "call" }, { "api_name": "cv2.COLOR_BGR2GRAY", "line_number": 19, "usage_type": "attribute" }, { "api_name": "cv2.Canny", "line_number": 20, "usage_type": "call" }, { "api_name": "cv2.HoughLinesP", "line_number": 27, "usage_type": "call" }, { "api_name": "numpy.pi", "line_number": 27, "usage_type": "attribute" }, { "api_name": "cv2.GaussianBlur", "line_number": 75, "usage_type": "call" }, { "api_name": "cv2.threshold", "line_number": 76, "usage_type": "call" }, { "api_name": "cv2.THRESH_BINARY", "line_number": 76, "usage_type": "attribute" }, { "api_name": "cv2.THRESH_OTSU", "line_number": 76, "usage_type": "attribute" }, { "api_name": "numpy.ones", "line_number": 77, "usage_type": "call" }, { "api_name": "numpy.uint8", "line_number": 77, "usage_type": "attribute" }, { "api_name": "cv2.dilate", "line_number": 78, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 103, "usage_type": "call" }, { "api_name": "cv2.imwrite", "line_number": 118, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 120, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.plot", "line_number": 126, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.show", "line_number": 127, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name" } ]
587584537
import copy from ..parser import ast from .definitions import Class from .definitions import Function from .utils import CompileError from .utils import GenericType from .utils import make_name from .utils import make_types_string_parts from .utils import mys_to_cpp_type_param from .utils import split_dict_mys_type def replace_generic_types(generic_types, mys_type, chosen_types): if isinstance(mys_type, str): for generic_type, chosen_type in zip(generic_types, chosen_types): if mys_type == generic_type: return chosen_type return mys_type class SpecializeGenericType: def __init__(self, generic_type, chosen_type, node=None): self.generic_type = generic_type self.chosen_type = chosen_type self.node = node def replace(self, mys_type): """Replaces all occurrences of generic types with chosen types. """ if isinstance(mys_type, str): if mys_type == self.generic_type: mys_type = self.chosen_type elif isinstance(mys_type, dict): key_mys_type, value_mys_type = split_dict_mys_type(mys_type) mys_type = {self.replace(key_mys_type): self.replace(value_mys_type)} elif isinstance(mys_type, list): mys_type = [self.replace(mys_type[0])] elif isinstance(mys_type, tuple): mys_type = tuple(self.replace(item_mys_type) for item_mys_type in mys_type) else: raise Exception('generic type not supported') return mys_type class SpecializeTypeTransformer(ast.NodeTransformer): """Traverses given generic node and replaces given generic types with given chosen types. """ def __init__(self, generic_types, chosen_types): self.generic_to_specialized_type = dict(zip(generic_types, chosen_types)) def visit_Name(self, node): node.id = self.generic_to_specialized_type.get(node.id, node.id) return node def specialize_function(function, specialized_full_name, chosen_types, node): """Returns a copy of the function object with all generic types replaced with chosen types. """ returns = function.returns args = copy.deepcopy(function.args) actual_ntypes = len(chosen_types) expected_ntypes = len(function.generic_types) if actual_ntypes != expected_ntypes: raise CompileError( f'expected {expected_ntypes} type, got {actual_ntypes}', node.func.slice) for generic_type, chosen_type in zip(function.generic_types, chosen_types): if returns is not None: returns = SpecializeGenericType(generic_type, chosen_type, function.node).replace(returns) for param, _ in args: param.type = SpecializeGenericType(generic_type, chosen_type, function.node).replace(param.type) node = copy.deepcopy(function.node) node.name = specialized_full_name node = SpecializeTypeTransformer(function.generic_types, chosen_types).visit(node) return Function(specialized_full_name, [], function.raises, function.is_test, args, returns, node, function.module_name) def specialize_class(definitions, specialized_name, chosen_types, node): """Returns a copy of the class object with all generic types replaced with chosen types. """ members = copy.deepcopy(definitions.members) methods = copy.deepcopy(definitions.methods) actual_ntypes = len(chosen_types) expected_ntypes = len(definitions.generic_types) if actual_ntypes != expected_ntypes: raise CompileError( f'expected {expected_ntypes} type, got {actual_ntypes}', node.slice) for generic_type, chosen_type in zip(definitions.generic_types, chosen_types): for member in members.values(): member.type = SpecializeGenericType(generic_type, chosen_type).replace(member.type) for class_methods in methods.values(): for method in class_methods: if method.returns is not None: method.returns = SpecializeGenericType( generic_type, chosen_type).replace(method.returns) for param, _node in method.args: param.type = SpecializeGenericType( generic_type, chosen_type).replace(param.type) method.node = SpecializeTypeTransformer( definitions.generic_types, chosen_types).visit(method.node) return Class(specialized_name, [], members, methods, definitions.functions, definitions.implements, definitions.node, definitions.module_name) def add_generic_class(node, context): name = node.value.id full_name = context.make_full_name(name) chosen_types = find_chosen_types(node, context) specialized_name, specialized_full_name = make_generic_name( name, full_name, chosen_types) definitions = context.get_class_definitions(full_name) if context.is_specialized_class_defined(specialized_full_name): specialized_class = context.get_specialized_class( specialized_full_name) else: specialized_class = specialize_class(definitions, specialized_name, chosen_types, node) context.define_specialized_class(specialized_full_name, specialized_class, node) context.define_class(specialized_name, specialized_full_name, specialized_class) return specialized_class, specialized_full_name def make_generic_name(name, full_name, chosen_types): joined_chosen_types = '_'.join(make_types_string_parts(chosen_types)) specialized_name = f'{name}_{joined_chosen_types}' specialized_full_name = f'{full_name}_{joined_chosen_types}' return specialized_name, specialized_full_name def find_chosen_types(node, context): types_slice = node.slice chosen_types = [ TypeVisitor(context).visit(type_node) for type_node in fix_chosen_types(types_slice, context.source_lines) ] return chosen_types def format_parameters(args, context): parameters = [] for param, _ in args: if isinstance(param.type, GenericType): param_type = add_generic_class(param.type.node, context)[1] else: param_type = param.type cpp_type = mys_to_cpp_type_param(param_type, context) parameters.append(f'{cpp_type} {make_name(param.name)}') if parameters: return ', '.join(parameters) else: return 'void' def fix_chosen_types(slice_node, source_lines): """Returns a list of nodes that represents the chosen types. """ if isinstance(slice_node, ast.Tuple): first = slice_node.elts[0] second = slice_node.elts[1] if slice_node.lineno != second.lineno: raise Exception('internal error') source = source_lines[slice_node.lineno - 1] opening = source[slice_node.col_offset:first.col_offset] closing = source[first.end_col_offset:second.col_offset] if opening.count('(') > closing.count(')'): types = [slice_node] else: types = slice_node.elts else: types = [slice_node] return types class TypeVisitor(ast.NodeVisitor): def __init__(self, context): self.context = context def visit_Name(self, node): name = node.id if self.context.is_class_defined(name): name = self.context.make_full_name(name) elif self.context.is_enum_defined(name): name = self.context.make_full_name(name) elif self.context.is_trait_defined(name): name = self.context.make_full_name(name) return name def visit_List(self, node): nitems = len(node.elts) if nitems != 1: raise CompileError(f"expected 1 type in list, got {nitems}", node) return [self.visit(elem) for elem in node.elts] def visit_Tuple(self, node): return tuple([self.visit(elem) for elem in node.elts]) def visit_Dict(self, node): return {node.keys[0].id: self.visit(node.values[0])} def visit_Subscript(self, node): return add_generic_class(node, self.context)[1]
null
mys/transpiler/generics.py
generics.py
py
9,088
python
en
code
null
code-starcoder2
83
[ { "api_name": "utils.split_dict_mys_type", "line_number": 39, "usage_type": "call" }, { "api_name": "parser.ast.NodeTransformer", "line_number": 52, "usage_type": "attribute" }, { "api_name": "parser.ast", "line_number": 52, "usage_type": "name" }, { "api_name": "copy.deepcopy", "line_number": 74, "usage_type": "call" }, { "api_name": "utils.CompileError", "line_number": 79, "usage_type": "call" }, { "api_name": "copy.deepcopy", "line_number": 94, "usage_type": "call" }, { "api_name": "definitions.Function", "line_number": 99, "usage_type": "call" }, { "api_name": "copy.deepcopy", "line_number": 115, "usage_type": "call" }, { "api_name": "definitions.members", "line_number": 115, "usage_type": "attribute" }, { "api_name": "copy.deepcopy", "line_number": 116, "usage_type": "call" }, { "api_name": "definitions.methods", "line_number": 116, "usage_type": "attribute" }, { "api_name": "definitions.generic_types", "line_number": 118, "usage_type": "attribute" }, { "api_name": "utils.CompileError", "line_number": 121, "usage_type": "call" }, { "api_name": "definitions.generic_types", "line_number": 125, "usage_type": "attribute" }, { "api_name": "definitions.generic_types", "line_number": 143, "usage_type": "attribute" }, { "api_name": "definitions.Class", "line_number": 146, "usage_type": "call" }, { "api_name": "definitions.functions", "line_number": 150, "usage_type": "attribute" }, { "api_name": "definitions.implements", "line_number": 151, "usage_type": "attribute" }, { "api_name": "definitions.node", "line_number": 152, "usage_type": "attribute" }, { "api_name": "definitions.module_name", "line_number": 153, "usage_type": "attribute" }, { "api_name": "utils.make_types_string_parts", "line_number": 186, "usage_type": "call" }, { "api_name": "utils.GenericType", "line_number": 207, "usage_type": "argument" }, { "api_name": "utils.mys_to_cpp_type_param", "line_number": 212, "usage_type": "call" }, { "api_name": "utils.make_name", "line_number": 213, "usage_type": "call" }, { "api_name": "parser.ast.Tuple", "line_number": 226, "usage_type": "attribute" }, { "api_name": "parser.ast", "line_number": 226, "usage_type": "name" }, { "api_name": "parser.ast.NodeVisitor", "line_number": 247, "usage_type": "attribute" }, { "api_name": "parser.ast", "line_number": 247, "usage_type": "name" }, { "api_name": "utils.CompileError", "line_number": 268, "usage_type": "call" } ]
138964636
import os import datetime from collections import namedtuple SourceResult = namedtuple('SourceResult', ['path', 'size', 'modified']) """Named Tuple containing a sources path, size, and modified time.""" class Sources: """ File relationship manager for App instances. Source files must be registered, as well as what files they generate and what files they depend on to do so. """ def __init__(self, app): """ Initialize a new Source instance for the given App. :param app: App to manage source pathsfor. :type app: pydgeot.app.App """ self.app = app self.cursor = self.app.db_cursor self.cursor.execute(''' CREATE TABLE IF NOT EXISTS sources ( id INTEGER PRIMARY KEY AUTOINCREMENT, path TEXT NOT NULL, size INTEGER NOT NULL, modified INTEGER NOT NULL, UNIQUE(path)) ''') # File map tables self.cursor.execute(''' CREATE TABLE IF NOT EXISTS source_targets ( id INTEGER PRIMARY KEY AUTOINCREMENT, source_id INTEGER NOT NULL, path TEXT NOT NULL, FOREIGN KEY(source_id) REFERENCES sources(id) ON DELETE CASCADE ON UPDATE CASCADE) ''') self.cursor.execute(''' CREATE TABLE IF NOT EXISTS source_dependencies ( id INTEGER PRIMARY KEY AUTOINCREMENT, source_id INTEGER NOT NULL, dependency_id INTEGER NOT NULL, FOREIGN KEY(source_id) REFERENCES sources(id) ON DELETE CASCADE ON UPDATE CASCADE, FOREIGN KEY(dependency_id) REFERENCES sources(id) ON DELETE CASCADE ON UPDATE CASCADE) ''') def _source_result(self, *row): """ Get a SourceResult from a path, size, modified query from the sources table, with the path transformed in to a source path. :param row: Tuple or Sqlite Row object with at least three elements representing path, size, and modified time, in that order. :type row: tuple[str, int, int] | sqlite3.Row :return: SourceResult with the path as a source path. :rtype: pydgeot.app.sources.SourceResult """ return SourceResult(self.app.source_path(row[0]), row[1], datetime.datetime.fromtimestamp(row[2])) def _target_result(self, *row): """ Get a SourceResult from a path query from the sources table, with the path transformed in to a target path. :param row: Tuple or Sqlite Row object with at least one element representing path as the first element. :type row: tuple[str] | sqlite3.Row :return: SourceResult with the path as a target path. :rtype: pydgeot.app.sources.SourceResult """ return SourceResult(self.app.target_path(row[0]), None, None) def clean(self, paths): """ Delete entries under the given source directories and their subdirectories. :param paths: List of content directory paths to delete entries for. :type paths: list[str] """ for path in paths: regex = self.app.path_regex(path, recursive=True) self.cursor.execute('SELECT id FROM sources WHERE path REGEXP ?', (regex, )) ids = [result[0] for result in self.cursor.fetchall()] if len(ids) > 0: id_query = '(' + ','.join('?' * len(ids)) + ')' self.cursor.execute(''' DELETE FROM source_dependencies WHERE source_id IN {0} OR dependency_id IN {0} '''.format(id_query), (ids + ids)) self.cursor.execute('DELETE FROM source_targets WHERE source_id IN {0}'.format(id_query), ids) self.cursor.execute('DELETE FROM sources WHERE id IN {0}'.format(id_query), ids) def add_source(self, source): """ Add a source entry to the database. Updates file information if the entry already exists. :param source: Source path to add. :type source: str :return: Entries database id. :rtype: int """ rel = self.app.relative_path(source) try: stats = os.stat(source) size = stats.st_size mtime = stats.st_mtime except FileNotFoundError: size = 0 mtime = 0 self.cursor.execute('SELECT id, size, modified FROM sources WHERE path = ?', (rel, )) result = self.cursor.fetchone() if result is not None: if size != result[1] or mtime != result[2]: self.cursor.execute('UPDATE sources SET size = ?, modified = ? WHERE id = ?', (size, mtime, result[0])) return result[0] self.cursor.execute(''' INSERT INTO sources (path, size, modified) VALUES (?, ?, ?) ''', (rel, size, mtime)) return self.cursor.lastrowid def get_source(self, source): """ Get a SourceResult for the given path. :param source: Source file path. :type source: str :return: SourceResult for the given path, or None if the path does not exist. :rtype: pydgeot.app.sources.SourceResult | None """ rel = self.app.relative_path(source) results = list(self.cursor.execute('SELECT path, size, modified FROM sources WHERE path = ?', (rel, ))) return self._source_result(*results[0]) if len(results) > 0 else None def get_sources(self, source_dir='', recursive=True): """ Get a list SourceResults for sources in the given directory. :param source_dir: Source directory to get files for. :type source_dir: str :param recursive: Return results in subdirectories of source_dir. :type recursive: bool :return: Set of SourceResults. :rtype: set[pydgeot.app.sources.SourceResult] """ regex = self.app.path_regex(source_dir, recursive) results = self.cursor.execute('SELECT path, size, modified FROM sources WHERE path REGEXP ?', (regex, )) return set([self._source_result(*result) for result in results]) def remove_source(self, source): """ Remove a source entry, and any associated source dependencies and target files. :param source: Source file path to remove. :type source: str """ rel = self.app.relative_path(source) self.cursor.execute('SELECT id FROM sources WHERE path = ?', (rel, )) result = self.cursor.fetchone() if result is not None: sid = result[0] self.cursor.execute('DELETE FROM source_targets WHERE source_id = ?', (sid, )) self.cursor.execute('DELETE FROM source_dependencies WHERE source_id = ? OR dependency_id = ?', (sid, sid)) self.cursor.execute('DELETE FROM sources WHERE id = ?', (sid, )) def get_targets(self, source, reverse=False): """ Get a list of target paths that a source path has generated. :param source: Source path to get targets path for. :type source: str :param reverse: Perform a reverse lookup instead. Returning source paths for a given target path. The source argument should be given a target path. :type reverse: bool :return: Set of SourceResults for target paths (where size and modified time will be None.) If reverse is True, a set of SourceResults for source paths. :rtype: set[pydgeot.app.sources.SourceResult] """ rel = self.app.relative_path(source) if reverse: results = self.cursor.execute(''' SELECT s.path, s.size, s.modified FROM source_targets AS st INNER JOIN sources s ON s.id = st.source_id WHERE st.path = ? ''', (rel, )) return set([self._source_result(*result) for result in results]) else: results = self.cursor.execute(''' SELECT st.path FROM source_targets AS st INNER JOIN sources s ON s.id = st.source_id WHERE s.path = ? ''', (rel, )) return set([self._target_result(*result) for result in results]) def set_targets(self, source, values): """ Set target paths for a source path. :param source: Source path to set target paths for. :type source: str :param values: List of target paths. :type values: list[str] """ rel = self.app.relative_path(source) self.cursor.execute(''' DELETE FROM source_targets WHERE id IN ( SELECT st.id FROM source_targets st INNER JOIN sources s ON s.id = st.source_id WHERE s.path = ?) ''', (rel, )) sid = self.add_source(source) self.cursor.executemany(''' INSERT INTO source_targets (source_id, path) VALUES (?, ?) ''', ([(sid, self.app.relative_path(value)) for value in values])) def get_dependencies(self, source, reverse=False, recursive=False): """ Get a list of source paths that a source path depends on to generate. If 'fileA.html' and 'fileB.html' are both templates that depend on 'base.html', then: get_dependencies('fileA.html') => ['base.html'] get_dependencies('base.html') => [] get_dependencies('base.html', reverse=True) => ['fileA.html', 'fileB.html'] :param source: Source path to get dependency paths for. :type source: str :param reverse: Perform a reverse lookup instead. Return source paths that depend on the given source path to generate. :type reverse: bool :param recursive: Include dependencies of dependencies. It's turtles all the way down. :type recursive: bool :return: Set of SourceResults. :rtype: set[pydgeot.app.sources.SourceResult] """ if recursive: return self._get_dependencies_recursive(source, reverse) rel = self.app.relative_path(source) if reverse: results = self.cursor.execute(''' SELECT s.path, s.size, s.modified FROM source_dependencies AS sd INNER JOIN sources s ON s.id = sd.source_id INNER JOIN sources d ON d.id = sd.dependency_id WHERE d.path = ? ''', (rel, )) else: results = self.cursor.execute(''' SELECT d.path, d.size, d.modified FROM source_dependencies AS sd INNER JOIN sources s ON s.id = sd.source_id INNER JOIN sources d ON d.id = sd.dependency_id WHERE s.path = ? ''', (rel, )) return set([self._source_result(*result) for result in results]) def _get_dependencies_recursive(self, source, reverse, _parent_deps=set()): """ Get a list of all dependencies for a file, cascading in dependencies of dependencies. :param source: Source path to get dependency paths for. :type source: str :param reverse: Perform a reverse lookup instead. Return source paths that depend on the given source path to generate. :type reverse: bool :return: Set of SourceResults. :rtype: set[pydgeot.app.sources.SourceResult] """ dependencies = self.get_dependencies(source, reverse=reverse) for dependency in list(dependencies): if dependency not in _parent_deps: dependencies |= self._get_dependencies_recursive(dependency.path, reverse, _parent_deps=dependencies) return dependencies def set_dependencies(self, source, values): """ Set source dependencies for a source path. :param source: Source path to set dependency paths for. :type source: str :param values: List of source dependency paths. :type values: list[str] """ sid = self.add_source(source) self.cursor.execute('DELETE FROM source_dependencies WHERE source_id = ?', (sid, )) value_ids = [self.add_source(value) for value in values] self.cursor.executemany(''' INSERT INTO source_dependencies (source_id, dependency_id) VALUES (?, ?) ''', [(sid, value_id) for value_id in value_ids])
null
pydgeot/app/sources.py
sources.py
py
12,939
python
en
code
null
code-starcoder2
83
[ { "api_name": "collections.namedtuple", "line_number": 6, "usage_type": "call" }, { "api_name": "datetime.datetime.fromtimestamp", "line_number": 68, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 68, "usage_type": "attribute" }, { "api_name": "os.stat", "line_number": 114, "usage_type": "call" } ]
309736655
from __future__ import annotations import hashlib from neo3 import vm, contracts, settings from neo3.network import payloads from neo3.core import cryptography from neo3.contracts.interop import register from typing import cast, List def stackitem_to_hash_data(engine: contracts.ApplicationEngine, stack_item: vm.StackItem) -> bytes: if isinstance(stack_item, vm.InteropStackItem): item = stack_item.get_object() if not issubclass(type(item), payloads.IVerifiable): raise ValueError("Invalid type") item = cast(payloads.IVerifiable, item) value = item.get_hash_data(settings.network.magic) elif isinstance(stack_item, vm.NullStackItem): value = engine.script_container.get_hash_data(settings.network.magic) else: value = stack_item.to_array() return value @register("Neo.Crypto.RIPEMD160", 1000000, contracts.native.CallFlags.NONE, True, [vm.StackItem]) def do_ripemd160(engine: contracts.ApplicationEngine, stack_item: vm.StackItem) -> bytes: value = stackitem_to_hash_data(engine, stack_item) return hashlib.new('ripemd160', value).digest() @register("Neo.Crypto.SHA256", 1000000, contracts.native.CallFlags.NONE, True, [vm.StackItem]) def do_sha256(engine: contracts.ApplicationEngine, stack_item: vm.StackItem) -> bytes: value = stackitem_to_hash_data(engine, stack_item) return hashlib.sha256(value).digest() @register("Neo.Crypto.VerifyWithECDsaSecp256r1", 1000000, contracts.native.CallFlags.NONE, True, [vm.StackItem, bytes, bytes]) def verify_with_ECDSA_Secp256r1(engine: contracts.ApplicationEngine, stack_item: vm.StackItem, public_key: bytes, signature: bytes) -> bool: value = stackitem_to_hash_data(engine, stack_item) return cryptography.verify_signature(value, signature, public_key, cryptography.ECCCurve.SECP256R1) @register("Neo.Crypto.VerifyWithECDsaSecp256k1", 1000000, contracts.native.CallFlags.NONE, True, [vm.StackItem, bytes, bytes]) def verify_with_ECDSA_Secp256k1(engine: contracts.ApplicationEngine, stack_item: vm.StackItem, public_key: bytes, signature: bytes) -> bool: value = stackitem_to_hash_data(engine, stack_item) return cryptography.verify_signature(value, signature, public_key, cryptography.ECCCurve.SECP256K1) def _check_multisig(engine: contracts.ApplicationEngine, stack_item: vm.StackItem, public_keys: List[bytes], signatures: List[bytes], curve: cryptography.ECCCurve) -> bool: len_pub_keys = len(public_keys) len_sigs = len(signatures) if len_sigs == 0: raise ValueError("No signatures supplied") if len_pub_keys == 0: raise ValueError("No public keys supplied") if len_sigs > len_pub_keys: raise ValueError(f"Verification requires {len_sigs} public keys, got only {len_pub_keys}") message = stackitem_to_hash_data(engine, stack_item) engine.add_gas(len_pub_keys * 1000000) i = 0 j = 0 try: while i < len_sigs and j < len_pub_keys: if cryptography.verify_signature(message, signatures[i], public_keys[j], curve): i += 1 j += 1 if len_sigs - i > len_pub_keys - j: return False except cryptography.ECCException as e: return False return True @register("Neo.Crypto.CheckMultisigWithECDsaSecp256r1", 0, contracts.native.CallFlags.NONE, True, [vm.StackItem, List[bytes], List[bytes]]) def check_multisig_with_ECDSA_Secp256r1(engine: contracts.ApplicationEngine, stack_item: vm.StackItem, public_keys: List[bytes], signatures: List[bytes]) -> bool: return _check_multisig(engine, stack_item, public_keys, signatures, cryptography.ECCCurve.SECP256R1) @register("Neo.Crypto.CheckMultisigWithECDsaSecp256k1", 0, contracts.native.CallFlags.NONE, True, [vm.StackItem, List[bytes], List[bytes]]) def check_multisig_with_ECDSA_Secp256k1(engine: contracts.ApplicationEngine, stack_item: vm.StackItem, public_keys: List[bytes], signatures: List[bytes]) -> bool: return _check_multisig(engine, stack_item, public_keys, signatures, cryptography.ECCCurve.SECP256K1)
null
neo3/contracts/interop/crypto.py
crypto.py
py
4,640
python
en
code
null
code-starcoder2
83
[ { "api_name": "neo3.contracts.ApplicationEngine", "line_number": 10, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 10, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 10, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 10, "usage_type": "name" }, { "api_name": "neo3.vm.InteropStackItem", "line_number": 11, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 11, "usage_type": "name" }, { "api_name": "neo3.network.payloads.IVerifiable", "line_number": 13, "usage_type": "attribute" }, { "api_name": "neo3.network.payloads", "line_number": 13, "usage_type": "name" }, { "api_name": "typing.cast", "line_number": 15, "usage_type": "call" }, { "api_name": "neo3.network.payloads.IVerifiable", "line_number": 15, "usage_type": "attribute" }, { "api_name": "neo3.network.payloads", "line_number": 15, "usage_type": "name" }, { "api_name": "neo3.settings.network", "line_number": 16, "usage_type": "attribute" }, { "api_name": "neo3.settings", "line_number": 16, "usage_type": "name" }, { "api_name": "neo3.vm.NullStackItem", "line_number": 17, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 17, "usage_type": "name" }, { "api_name": "neo3.settings.network", "line_number": 18, "usage_type": "attribute" }, { "api_name": "neo3.settings", "line_number": 18, "usage_type": "name" }, { "api_name": "neo3.contracts.ApplicationEngine", "line_number": 25, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 25, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 25, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 25, "usage_type": "name" }, { "api_name": "hashlib.new", "line_number": 27, "usage_type": "call" }, { "api_name": "neo3.contracts.interop.register", "line_number": 24, "usage_type": "call" }, { "api_name": "neo3.contracts.native", "line_number": 24, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 24, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 24, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 24, "usage_type": "name" }, { "api_name": "neo3.contracts.ApplicationEngine", "line_number": 31, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 31, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 31, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 31, "usage_type": "name" }, { "api_name": "hashlib.sha256", "line_number": 33, "usage_type": "call" }, { "api_name": "neo3.contracts.interop.register", "line_number": 30, "usage_type": "call" }, { "api_name": "neo3.contracts.native", "line_number": 30, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 30, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 30, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 30, "usage_type": "name" }, { "api_name": "neo3.contracts.ApplicationEngine", "line_number": 38, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 38, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 39, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 39, "usage_type": "name" }, { "api_name": "neo3.core.cryptography.verify_signature", "line_number": 43, "usage_type": "call" }, { "api_name": "neo3.core.cryptography", "line_number": 43, "usage_type": "name" }, { "api_name": "neo3.core.cryptography.ECCCurve", "line_number": 43, "usage_type": "attribute" }, { "api_name": "neo3.contracts.interop.register", "line_number": 36, "usage_type": "call" }, { "api_name": "neo3.contracts.native", "line_number": 36, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 36, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 37, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 37, "usage_type": "name" }, { "api_name": "neo3.contracts.ApplicationEngine", "line_number": 48, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 48, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 49, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 49, "usage_type": "name" }, { "api_name": "neo3.core.cryptography.verify_signature", "line_number": 53, "usage_type": "call" }, { "api_name": "neo3.core.cryptography", "line_number": 53, "usage_type": "name" }, { "api_name": "neo3.core.cryptography.ECCCurve", "line_number": 53, "usage_type": "attribute" }, { "api_name": "neo3.contracts.interop.register", "line_number": 46, "usage_type": "call" }, { "api_name": "neo3.contracts.native", "line_number": 46, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 46, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 47, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 47, "usage_type": "name" }, { "api_name": "neo3.contracts.ApplicationEngine", "line_number": 56, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 56, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 57, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 57, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 58, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 59, "usage_type": "name" }, { "api_name": "neo3.core.cryptography.ECCCurve", "line_number": 60, "usage_type": "attribute" }, { "api_name": "neo3.core.cryptography", "line_number": 60, "usage_type": "name" }, { "api_name": "neo3.core.cryptography.verify_signature", "line_number": 78, "usage_type": "call" }, { "api_name": "neo3.core.cryptography", "line_number": 78, "usage_type": "name" }, { "api_name": "neo3.core.cryptography.ECCException", "line_number": 84, "usage_type": "attribute" }, { "api_name": "neo3.core.cryptography", "line_number": 84, "usage_type": "name" }, { "api_name": "neo3.contracts.ApplicationEngine", "line_number": 91, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 91, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 92, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 92, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 93, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 94, "usage_type": "name" }, { "api_name": "neo3.core.cryptography.ECCCurve", "line_number": 95, "usage_type": "attribute" }, { "api_name": "neo3.core.cryptography", "line_number": 95, "usage_type": "name" }, { "api_name": "neo3.contracts.interop.register", "line_number": 89, "usage_type": "call" }, { "api_name": "neo3.contracts.native", "line_number": 89, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 89, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 90, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 90, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 90, "usage_type": "name" }, { "api_name": "neo3.contracts.ApplicationEngine", "line_number": 100, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 100, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 101, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 101, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 102, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 103, "usage_type": "name" }, { "api_name": "neo3.core.cryptography.ECCCurve", "line_number": 104, "usage_type": "attribute" }, { "api_name": "neo3.core.cryptography", "line_number": 104, "usage_type": "name" }, { "api_name": "neo3.contracts.interop.register", "line_number": 98, "usage_type": "call" }, { "api_name": "neo3.contracts.native", "line_number": 98, "usage_type": "attribute" }, { "api_name": "neo3.contracts", "line_number": 98, "usage_type": "name" }, { "api_name": "neo3.vm.StackItem", "line_number": 99, "usage_type": "attribute" }, { "api_name": "neo3.vm", "line_number": 99, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 99, "usage_type": "name" } ]
240252817
from . import ForecastMixin from datetime import timedelta, datetime import xml.etree.ElementTree as ET class Forecast(ForecastMixin): url = 'https://geoservices.atmosud.org/geoserver/ind_sudpaca/ows?service=WFS&version=1.1.0' fr_date_format = '%d-%m-%Y 00:00:00' @classmethod def params(cls, date, insee): parsed_date = datetime.strptime(date, '%Y-%m-%d') tomorrow_date = parsed_date + timedelta(days=1) fr_date = parsed_date.strftime(cls.fr_date_format) fr_tomorrow = tomorrow_date.strftime(cls.fr_date_format) return { 'service': 'WFS', 'version': '1.1.0', 'request': 'GetFeature', 'typeName': 'ind_sudpaca:ind_sudpaca_agglo', 'CQL_FILTER': f"code_zone='{insee}' AND (date_ech='{fr_date}' OR date_ech='{fr_tomorrow}')" } @classmethod def features(cls, r): root = ET.fromstring(r.text) return filter(lambda el: el.tag == '{http://ind_sudpaca}ind_sudpaca_agglo', root[0]) @classmethod def getter(cls, feature): feature_dict = {f.tag: f.text for f in feature} return { 'indice': feature_dict['{http://ind_sudpaca}valeur'], 'date': datetime.strptime( feature_dict['{http://ind_sudpaca}date_ech'], cls.fr_date_format ).strftime('%Y-%m-%d') } @classmethod def insee_list(cls): return ['06029', '06088', '13001', '13055', '83137', '84007']
null
indice_pollution/regions/Sud.py
Sud.py
py
1,500
python
en
code
null
code-starcoder2
83
[ { "api_name": "datetime.datetime.strptime", "line_number": 11, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 11, "usage_type": "name" }, { "api_name": "datetime.timedelta", "line_number": 12, "usage_type": "call" }, { "api_name": "xml.etree.ElementTree.fromstring", "line_number": 27, "usage_type": "call" }, { "api_name": "xml.etree.ElementTree", "line_number": 27, "usage_type": "name" }, { "api_name": "datetime.datetime.strptime", "line_number": 37, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 37, "usage_type": "name" } ]
277185451
import numpy as np from sklearn.datasets import load_iris from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense dataset = load_iris() print(dataset.DESCR) print(dataset.feature_names) x = dataset.data y = dataset.target print(x.shape , y.shape) ''' from tensorflow.keras.utils import to_categorical y = to_categorical(y) ๋จธ์‹ ๋Ÿฌ๋‹์—์„  ์›ํ•ซ์ธ์ฝ”๋”ฉ ์ ์šฉ์•ˆํ•ด๋„ ํ•ด์คŒ ValueError: y should be a 1d array, got an array of shape (142, 3) instead. 1์ฐจ์›์„ ์ค˜์•ผํ•˜๋Š”๋ฐ 2์ฐจ์›์„์คซ์Œ ์›ํ•ซ์ธ์ฝ”๋”ฉ ๋•Œ๋ฌธ ๋Œ€๋ถ€๋ถ„์˜ ๋จธ์‹ ๋Ÿฌ๋‹๋“ค์„ y๋ฅผ 1์ฐจ์›์œผ๋กœ ๋ฐ›์•„๋“ค์—ฌ์„œ ์˜ค๋ฅ˜๊ฐ€๋‚˜๊ณ  ๋”ฐ๋กœ ์•ˆํ•ด์ค˜๋„๋จ ''' from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x,y, train_size = 0.95, random_state=66) from sklearn.preprocessing import MinMaxScaler, StandardScaler scaler = MinMaxScaler() scaler.fit(x_train) x_train = scaler.transform(x_train) x_test = scaler.transform(x_test) from sklearn.svm import LinearSVC # ์†Œํ”„ํŠธ ๋ฐฑํ„ฐ ๋จธ์‹  ์•ˆ์— ๋ฆฌ๋‹ˆ์–ดsvc, ์„ ์„ ๊ทธ์–ด์„œ ๋ถ„๋ฅ˜ model = LinearSVC() model.fit(x_train,y_train) from sklearn.metrics import r2_score,accuracy_score # r2 ํšŒ๊ธฐ๋ชจ๋ธ ํ‰๊ฐ€ acc ๋ถ„๋ฅ˜๋ชจ๋ธํ‰๊ฐ€ y_predic = model.predict(x_test) acc = accuracy_score(y_test,y_predic) # ๋”ฅ๋Ÿฌ๋‹์—์„œ acc์™€ ๊ฒฐ๊ณผ๊ฐ€ ๊ฐ™๋‹ค ๋”ฅ๋Ÿฌ๋‹evaluate์—์„œ xtest๋ฅผ predicํ•ด์„œ ytest์™€ ๋น„๊ตํ•˜๊ธฐ๋•Œ๋ฌธ์ด๋‹ค. print('acc : ',acc) results = model.score(x_test, y_test) print('results : ',results) y_predic2 = model.predict(x_test[:5]) print('y_predic : ',y_predic) ''' ์›ํ•ซ ์ธ์ฝ”๋”ฉ ์ ์šฉํ›„ Epoch 00166: early stopping 1/1 [==============================] - 0s 13ms/step - loss: 1.4669e-04 - accuracy: 1.0000 loss : 0.00014668621588498354 accuracy : 1.0 loss : 0.0 accuracy : 0.625 ML results : 1.0 y_predic : [1 1 1 0 1] acc : 1.0 results : 1.0 y_predic : [1 1 1 0 1 1 0 0] '''
null
ML/m01.py
m01.py
py
1,931
python
en
code
null
code-starcoder2
83
[ { "api_name": "sklearn.datasets.load_iris", "line_number": 5, "usage_type": "call" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 22, "usage_type": "call" }, { "api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 26, "usage_type": "call" }, { "api_name": "sklearn.svm.LinearSVC", "line_number": 32, "usage_type": "call" }, { "api_name": "sklearn.metrics.accuracy_score", "line_number": 37, "usage_type": "call" } ]
579980692
#!/usr/bin/python from pymatgen.util.testing import PymatgenTest from pymatgen.core.operations import SymmOp import numpy as np class SymmOpTestCase(PymatgenTest): def setUp(self): self.op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, [0, 0, 1]) def test_properties(self): rot = self.op.rotation_matrix vec = self.op.translation_vector self.assertArrayAlmostEqual(rot, [[0.8660254, -0.5, 0.], [0.5, 0.8660254, 0.], [0., 0., 1.]], 2) self.assertArrayAlmostEqual(vec, [0, 0, 1], 2) def test_operate(self): point = np.array([1, 2, 3]) newcoord = self.op.operate(point) self.assertArrayAlmostEqual(newcoord, [-0.1339746, 2.23205081, 4.], 2) def test_inverse(self): point = np.random.rand(3) newcoord = self.op.operate(point) self.assertArrayAlmostEqual(self.op.inverse.operate(newcoord), point, 2) def test_reflection(self): normal = np.random.rand(3) origin = np.random.rand(3) refl = SymmOp.reflection(normal, origin) point = np.random.rand(3) newcoord = refl.operate(point) #Distance to the plane should be negatives of each other. self.assertAlmostEqual(np.dot(newcoord - origin, normal), -np.dot(point - origin, normal)) def test_apply_rotation_only(self): point = np.random.rand(3) newcoord = self.op.operate(point) rotate_only = self.op.apply_rotation_only(point) self.assertArrayAlmostEqual( rotate_only + self.op.translation_vector, newcoord, 2) def test_are_symmetrically_related(self): point = np.random.rand(3) newcoord = self.op.operate(point) self.assertTrue(self.op.are_symmetrically_related(point, newcoord)) self.assertTrue(self.op.are_symmetrically_related(newcoord, point)) def test_to_from_dict(self): d = self.op.to_dict op = SymmOp.from_dict(d) point = np.random.rand(3) newcoord = self.op.operate(point) self.assertTrue(op.are_symmetrically_related(point, newcoord)) def test_inversion(self): origin = np.random.rand(3) op = SymmOp.inversion(origin) pt = np.random.rand(3) inv_pt = op.operate(pt) self.assertArrayAlmostEqual(pt - origin, origin - inv_pt) if __name__ == '__main__': import unittest unittest.main()
null
pymatgen/core/tests/test_operations.py
test_operations.py
py
2,616
python
en
code
null
code-starcoder2
83
[ { "api_name": "pymatgen.util.testing.PymatgenTest", "line_number": 8, "usage_type": "name" }, { "api_name": "pymatgen.core.operations.SymmOp.from_axis_angle_and_translation", "line_number": 11, "usage_type": "call" }, { "api_name": "pymatgen.core.operations.SymmOp", "line_number": 11, "usage_type": "name" }, { "api_name": "numpy.array", "line_number": 23, "usage_type": "call" }, { "api_name": "numpy.random.rand", "line_number": 28, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 28, "usage_type": "attribute" }, { "api_name": "numpy.random.rand", "line_number": 34, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 34, "usage_type": "attribute" }, { "api_name": "numpy.random.rand", "line_number": 35, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 35, "usage_type": "attribute" }, { "api_name": "pymatgen.core.operations.SymmOp.reflection", "line_number": 36, "usage_type": "call" }, { "api_name": "pymatgen.core.operations.SymmOp", "line_number": 36, "usage_type": "name" }, { "api_name": "numpy.random.rand", "line_number": 37, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 37, "usage_type": "attribute" }, { "api_name": "numpy.dot", "line_number": 40, "usage_type": "call" }, { "api_name": "numpy.dot", "line_number": 41, "usage_type": "call" }, { "api_name": "numpy.random.rand", "line_number": 45, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 45, "usage_type": "attribute" }, { "api_name": "numpy.random.rand", "line_number": 52, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 52, "usage_type": "attribute" }, { "api_name": "pymatgen.core.operations.SymmOp.from_dict", "line_number": 59, "usage_type": "call" }, { "api_name": "pymatgen.core.operations.SymmOp", "line_number": 59, "usage_type": "name" }, { "api_name": "numpy.random.rand", "line_number": 60, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 60, "usage_type": "attribute" }, { "api_name": "numpy.random.rand", "line_number": 65, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 65, "usage_type": "attribute" }, { "api_name": "pymatgen.core.operations.SymmOp.inversion", "line_number": 66, "usage_type": "call" }, { "api_name": "pymatgen.core.operations.SymmOp", "line_number": 66, "usage_type": "name" }, { "api_name": "numpy.random.rand", "line_number": 67, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 67, "usage_type": "attribute" }, { "api_name": "unittest.main", "line_number": 73, "usage_type": "call" } ]
114038595
from math import sqrt from scipy.stats import t from math_helper.mean import mean from math_helper.variance import variance def ttest(column,population_mean): """Calculates a two-way t-test Args: column: column to test population_mean: the mean to test agains Returns: A list of test statistics """ data = column['data'] data_mean = mean(data) data_var = variance(data) observations = len(data) standard_err = sqrt(data_var/observations) degrees_of_freedom = observations - 1 t_statistic = ( data_mean - population_mean ) / standard_err p_stat = t.cdf(t_statistic,degrees_of_freedom) p_value = ( 1 - p_stat ) * 2 if data_mean < population_mean: p_value = p_stat * 2 return [ data_mean, population_mean, t_statistic, p_value, degrees_of_freedom ]
null
src/analyses/ttest.py
ttest.py
py
916
python
en
code
null
code-starcoder2
83
[ { "api_name": "math_helper.mean.mean", "line_number": 21, "usage_type": "call" }, { "api_name": "math_helper.variance.variance", "line_number": 22, "usage_type": "call" }, { "api_name": "math.sqrt", "line_number": 24, "usage_type": "call" }, { "api_name": "scipy.stats.t.cdf", "line_number": 28, "usage_type": "call" }, { "api_name": "scipy.stats.t", "line_number": 28, "usage_type": "name" } ]
637120121
#!/usr/bin/python3 """ Sample usage: $ TEGAKI_API_KEY='xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' python delete_request.py request_id """ import os import sys import requests TEGAKI_DELETE_REQUEST_ENDPOINT = ('delete', 'https://api.tegaki.ai/hwr/v2/request') MY_API_KEY = os.getenv('TEGAKI_API_KEY') # delete results from a completed request using the id def delete_result(request_id): # Build the endpoint with the request id method, endpoint = TEGAKI_DELETE_REQUEST_ENDPOINT endpoint += "/{}".format(request_id) # Send DELETE request response = requests.request(method, endpoint, headers={'Authorization': 'apikey {}'.format(MY_API_KEY)}) return response.json() if __name__ == '__main__': request_id, *_ = sys.argv[1:] delete_result(request_id)
null
delete_request.py
delete_request.py
py
820
python
en
code
null
code-starcoder2
83
[ { "api_name": "os.getenv", "line_number": 14, "usage_type": "call" }, { "api_name": "requests.request", "line_number": 24, "usage_type": "call" }, { "api_name": "sys.argv", "line_number": 30, "usage_type": "attribute" } ]
608133939
import pandas as pd import numpy as np from plotly.subplots import make_subplots import plotly.graph_objects as go def conversion(path): """ Converts a tij.dat file into a np.array. :param path: path of the tij.dat file :type path: str :return: np.array of the tij data :rtype: np.array """ df = pd.read_csv(path, sep='\t') tij_array = df.to_numpy() return tij_array def unique(ar): """ This function gives each unique value of an array and the number of occurrence of the value :param ar: Array that is studied :type ar: np.array :return: Unique values of ar and the number of occurrences :rtype: tuple of np.array """ values, counts = np.unique(ar, return_counts=True) return values, counts def common(ar1, ar2): """ This functions returns the common rows of ar1 and ar2 :param ar1: First array :type ar1: np.array :param ar2: Second array :type ar2: np.array :return: array of common rows :rtype: np.array """ common_array = np.array([x for x in set(tuple(x) for x in ar1) & set(tuple(x) for x in ar2)]) return common_array def lost(ar1, ar2): """ This function finds the rows that are in ar1 but not in ar2. These rows are called the lost rows. :param ar1: First array :type ar1: np.array :param ar2: Second array :type ar2: np.array :return: array of the lost rows :rtype: np.array """ set1 = {tuple(x) for x in ar1} set2 = {tuple(x) for x in ar2} lost_set = (set1 ^ set2) & set1 if len(lost_set) != 0: lost_array = np.array(list(lost_set)) else: lost_array = np.empty((0, 2), dtype=int) return lost_array def new(ar1, ar2): """ This function finds the rows that are in ar2 but not in ar1. These rows are called the new rows. :param ar1: First array :type ar1: np.array :param ar2: Second array :type ar2: np.array :return: array of the lost rows :rtype: np.array """ set1 = {tuple(x) for x in ar1} set2 = {tuple(x) for x in ar2} new_set = set2 - set1 if len(new_set) != 0: new_array = np.array(list(new_set)) else: new_array = np.empty((0, 2), dtype=int) return new_array def add_time(time, couples, timeline_array): """ This function adds :param time: :param couples: :param timeline_array: :return: """ for elt in couples: i = elt[0] j = elt[1] if i < j: timeline_array[i, j].append(time) else: timeline_array[j, i].append(time) return timeline_array def timeline(tij_array, dt): """ This function returns an array of timelines of interactions between all the particles. A timeline between particle i and j has the following form [t1, t2, t3, t4 ...] with all the odd elements the time of the beginning of an interaction and all the even elements the time of the end of an interaction. As the interaction between i and j is strictly the same as the interaction between j and i the array should be symmetric, with all the diagonal elements equal to 0 (no interaction between i and i). In our case the array is strictly upper triangular (no need to keep in memory all the elements). :param tij_array: Array of the tij elements, that are needed to create the timeline array :type tij_array: np.array :param dt: Increment of time for each step :type dt: float or int :return: Array of timelines. :rtype: np.array of lists """ time_array, counts = unique(tij_array[:, 0]) ij_array = tij_array[:, 1:] ij_array = np.int64(ij_array) i_min = np.min(ij_array) i_max = np.max(ij_array) ij_array = ij_array - i_min timeline_size = (i_max - i_min + 1,) * 2 timeline_array = np.frompyfunc(list, 0, 1)(np.empty(timeline_size, dtype=object)) count = counts[0] couples = ij_array[0:count] old_time = time_array[0] timeline_array = add_time(old_time, couples, timeline_array) for step, time in enumerate(time_array[1:]): if time - old_time > dt: timeline_array = add_time(old_time + dt, couples, timeline_array) couples = [] new_count = count + counts[step + 1] couples1 = ij_array[count: new_count, :] new_couples = new(couples, couples1) lost_couples = lost(couples, couples1) if new_couples.size > 0: timeline_array = add_time(time, new_couples, timeline_array) if lost_couples.size > 0: timeline_array = add_time(old_time + dt, lost_couples, timeline_array) couples = couples1 count = new_count old_time = time return timeline_array def quantities_calculator(timeline_array, dec=1): """ Calculates 4 different quantities - contact time, inter-contact time, number of contacts and weight - that are needed to compare and validate different models with real data. :param timeline_array: Array of timelines. :type timeline_array: np.array of lists :param dec: decimals to which we around the quantities. Default is equal to 1 :type dec: int, optional """ contact_time_array = [] inter_contact_time_array = [] number_contact_array = [] link_weight_array = [] for elt in timeline_array: for elt1 in elt: if len(elt1) % 2 == 1: elt1.pop() if len(elt1) > 0: number_contact_array.append(len(elt1) // 2) contact_time = [b - a for a, b in tuple(zip(elt1, elt1[1:]))[::2]] contact_time_array.extend(contact_time) link_weight_array.append(sum(contact_time)) inter_contact_time = [b - a for a, b in tuple(zip(elt1[1:], elt1[2:]))[::2]] inter_contact_time_array.extend(inter_contact_time) contact_time_array, inter_contact_time_array = np.array(contact_time_array), np.array(inter_contact_time_array) number_contact_array, link_weight_array = np.array(number_contact_array, dtype=int), np.array(link_weight_array) contact_time_array = np.around(contact_time_array, decimals=dec) inter_contact_time_array = np.around(inter_contact_time_array, decimals=dec) link_weight_array = np.around(link_weight_array, decimals=dec) return contact_time_array, inter_contact_time_array, number_contact_array, link_weight_array def regroup_data(ar): """ This function regroups the quantities with the same value and calculates the number of occurrence of the value. The results are then put in a array where for all i, the first element of row i is value i and the second element of row i is its number of occurrences. :param ar: Array of all the values, of shape (n, ) :type ar: np.array :return: array of shape (n', 2) of values and counts :rtype: np.array """ values, counts = unique(ar) return np.concatenate((values.reshape((-1, 1)), counts.reshape((-1, 1))), axis=1) def representation(quantities, title, scale='linear'): """ Represents 4 different quantities - contact time, inter-contact time, number of contacts and weight - in histograms. :param quantities: tuple of the 4 quantities that are represented :type quantities: tuple of np.arrays :param title: Title of the figure :type title: str :param scale: Scale of the plot. Can be 'linear' (default), 'log' or 'semi-log' :type scale: str, optional """ fig = make_subplots(rows=2, cols=2) index = [[1, 1], [1, 2], [2, 1], [2, 2]] if scale == 'log': scale_x, scale_y = scale, scale elif scale == 'linear': scale_x, scale_y = scale, scale else: scale_x, scale_y = 'linear', 'log' # Update xaxis properties fig.update_xaxes(title_text="Contact duration", type=scale_x, row=1, col=1) fig.update_xaxes(title_text="Intercontact duration", type=scale_x, row=1, col=2) fig.update_xaxes(title_text="Number of contacts", type=scale_x, row=2, col=1) fig.update_xaxes(title_text="weight", type=scale_x, row=2, col=2) # Update yaxis properties fig.update_yaxes(title_text="Distribution of contact duration", type=scale_y, row=1, col=1) fig.update_yaxes(title_text="Distribution of intercontact duration", type=scale_y, row=1, col=2) fig.update_yaxes(title_text="Distribution of number of contacts", type=scale_y, row=2, col=1) fig.update_yaxes(title_text="Distribution of weight", type=scale_y, row=2, col=2) for i, data in enumerate(quantities): a = index[i][0] b = index[i][1] if scale == 'log': counts, bins = np.histogram(data, bins=np.logspace(np.log10(np.min(data - 0.5)), np.log10(np.max(data + 0.5))), density=True) else: counts, bins = np.histogram(data, bins='auto', density=True) bins = 0.5 * (bins[:-1] + bins[1:]) fig.add_trace(go.Scatter(x=bins, y=counts, mode='markers', showlegend=False), row=a, col=b) fig.show() def make_hist(quantities, title, scale='linear'): """ Represents 4 different quantities - contact time, inter-contact time, number of contacts and weight - in histograms. :param quantities: tuple of the 4 quantities that are represented :type quantities: tuple of np.arrays :param title: Title of the figure :type title: str :param scale: Scale of the plot. Can be 'linear' (default), 'log' or 'semi-log' :type scale: str, optional """ fig = make_subplots(rows=2, cols=2) index = [[1, 1], [1, 2], [2, 1], [2, 2]] if scale == 'log': scale_x, scale_y = scale, scale elif scale == 'linear': scale_x, scale_y = scale, scale else: scale_x, scale_y = 'linear', 'log' # Update x axis properties fig.update_xaxes(title_text="Contact duration", type=scale_x, row=1, col=1) fig.update_xaxes(title_text="Inter contact duration", type=scale_x, row=1, col=2) fig.update_xaxes(title_text="Number of contacts", type=scale_x, row=2, col=1) fig.update_xaxes(title_text="weight", type=scale_x, row=2, col=2) # Update y axis properties fig.update_yaxes(title_text="Contact duration distribution", type=scale_y, row=1, col=1) fig.update_yaxes(title_text="Inter contact duration distribution", type=scale_y, row=1, col=2) fig.update_yaxes(title_text="Number of contacts distribution", type=scale_y, row=2, col=1) fig.update_yaxes(title_text="Weight distribution", type=scale_y, row=2, col=2) for i, data in enumerate(quantities): a = index[i][0] b = index[i][1] if scale == 'log': counts, bins = np.histogram(data, bins=np.logspace(np.log10(min(data - 0.5)), np.log10(max(data + 0.5))), density=True) else: counts, bins = np.histogram(data, bins='auto', density=True) bins = 0.5 * (bins[:-1] + bins[1:]) fig.add_trace(go.Histogram(x=bins, y=counts, showlegend=False), row=a, col=b) fig.show() def compare_quantities(quantities_array, label_array, title='Comparison tij data', scale='linear'): fig = make_subplots(rows=2, cols=2) index = [[1, 1], [1, 2], [2, 1], [2, 2]] colors = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)', 'rgb(44, 160, 44)', 'rgb(214, 39, 40)', 'rgb(148, 103, 189)', 'rgb(140, 86, 75)', 'rgb(227, 119, 194)', 'rgb(127, 127, 127)', 'rgb(188, 189, 34)', 'rgb(23, 190, 207)'] markers = ['star-triangle-up', 'circle', 'x', 'diamond'] if scale == 'log': scale_x, scale_y = scale, scale elif scale == 'linear': scale_x, scale_y = scale, scale else: scale_x, scale_y = 'linear', 'log' # Update xaxis properties fig.update_xaxes(title_text="Contact duration", type=scale_x, row=1, col=1) fig.update_xaxes(title_text="Intercontact duration", type=scale_x, row=1, col=2) fig.update_xaxes(title_text="Number of contacts", type=scale_x, row=2, col=1) fig.update_xaxes(title_text="weight", type=scale_x, row=2, col=2) # Update yaxis properties fig.update_yaxes(title_text="Contact duration distribution", type=scale_y, row=1, col=1) fig.update_yaxes(title_text="Inter contact duration distribution", type=scale_y, row=1, col=2) fig.update_yaxes(title_text="Number of contacts distribution", type=scale_y, row=2, col=1) fig.update_yaxes(title_text="Weight distribution", type=scale_y, row=2, col=2) for j, data in reversed(list(enumerate(quantities_array))): data_label = label_array[j] for i in range(4): a = index[i][0] b = index[i][1] data = quantities_array[j][i] if scale == 'log': counts, bins = np.histogram(data, bins=np.logspace(np.log10(np.min(data - 0.5)), np.log10(np.max(data + 0.5))), density=True) else: counts, bins = np.histogram(data, bins='auto', density=True) bins = np.array([(elt + bins[i + 1]) / 2 for i, elt in enumerate(bins[:-1])]) non_null_index = np.where(counts != 0)[0] bins, counts = bins[non_null_index], counts[non_null_index] if j == 0: if i == 0: fig.add_trace( go.Scatter(x=bins, y=counts, mode='lines', marker={'color': colors[j]}, fillcolor=colors[j], name=data_label), row=a, col=b) else: fig.add_trace( go.Scatter(x=bins, y=counts, mode='lines', marker={'color': colors[j]}, fillcolor=colors[j], name=data_label, showlegend=False), row=a, col=b) else: if i == 0: fig.add_trace(go.Scatter(x=bins, y=counts, marker={'color': colors[j], 'symbol': markers[j - 1]}, name=data_label, mode='markers'), row=a, col=b) else: fig.add_trace(go.Scatter(x=bins, y=counts, marker={'color': colors[j], 'symbol': markers[j - 1]}, name=data_label, mode='markers', showlegend=False), row=a, col=b) fig.show()
null
tij.py
tij.py
py
14,440
python
en
code
null
code-starcoder2
83
[ { "api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call" }, { "api_name": "numpy.unique", "line_number": 30, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 44, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 63, "usage_type": "call" }, { "api_name": "numpy.empty", "line_number": 65, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 83, "usage_type": "call" }, { "api_name": "numpy.empty", "line_number": 85, "usage_type": "call" }, { "api_name": "numpy.int64", "line_number": 125, "usage_type": "call" }, { "api_name": "numpy.min", "line_number": 126, "usage_type": "call" }, { "api_name": "numpy.max", "line_number": 127, "usage_type": "call" }, { "api_name": "numpy.frompyfunc", "line_number": 130, "usage_type": "call" }, { "api_name": "numpy.empty", "line_number": 130, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 188, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 189, "usage_type": "call" }, { "api_name": "numpy.around", "line_number": 190, "usage_type": "call" }, { "api_name": "numpy.around", "line_number": 191, "usage_type": "call" }, { "api_name": "numpy.around", "line_number": 192, "usage_type": "call" }, { "api_name": "numpy.concatenate", "line_number": 209, "usage_type": "call" }, { "api_name": "plotly.subplots.make_subplots", "line_number": 223, "usage_type": "call" }, { "api_name": "numpy.histogram", "line_number": 251, "usage_type": "call" }, { "api_name": "numpy.logspace", "line_number": 251, "usage_type": "call" }, { "api_name": "numpy.log10", "line_number": 251, "usage_type": "call" }, { "api_name": "numpy.min", "line_number": 251, "usage_type": "call" }, { "api_name": "numpy.log10", "line_number": 252, "usage_type": "call" }, { "api_name": "numpy.max", "line_number": 252, "usage_type": "call" }, { "api_name": "numpy.histogram", "line_number": 255, "usage_type": "call" }, { "api_name": "plotly.graph_objects.Scatter", "line_number": 257, "usage_type": "call" }, { "api_name": "plotly.graph_objects", "line_number": 257, "usage_type": "name" }, { "api_name": "plotly.subplots.make_subplots", "line_number": 273, "usage_type": "call" }, { "api_name": "numpy.histogram", "line_number": 301, "usage_type": "call" }, { "api_name": "numpy.logspace", "line_number": 301, "usage_type": "call" }, { "api_name": "numpy.log10", "line_number": 301, "usage_type": "call" }, { "api_name": "numpy.histogram", "line_number": 305, "usage_type": "call" }, { "api_name": "plotly.graph_objects.Histogram", "line_number": 307, "usage_type": "call" }, { "api_name": "plotly.graph_objects", "line_number": 307, "usage_type": "name" }, { "api_name": "plotly.subplots.make_subplots", "line_number": 313, "usage_type": "call" }, { "api_name": "numpy.histogram", "line_number": 352, "usage_type": "call" }, { "api_name": "numpy.logspace", "line_number": 352, "usage_type": "call" }, { "api_name": "numpy.log10", "line_number": 352, "usage_type": "call" }, { "api_name": "numpy.min", "line_number": 352, "usage_type": "call" }, { "api_name": "numpy.log10", "line_number": 353, "usage_type": "call" }, { "api_name": "numpy.max", "line_number": 353, "usage_type": "call" }, { "api_name": "numpy.histogram", "line_number": 356, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 358, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 359, "usage_type": "call" }, { "api_name": "plotly.graph_objects.Scatter", "line_number": 365, "usage_type": "call" }, { "api_name": "plotly.graph_objects", "line_number": 365, "usage_type": "name" }, { "api_name": "plotly.graph_objects.Scatter", "line_number": 369, "usage_type": "call" }, { "api_name": "plotly.graph_objects", "line_number": 369, "usage_type": "name" }, { "api_name": "plotly.graph_objects.Scatter", "line_number": 374, "usage_type": "call" }, { "api_name": "plotly.graph_objects", "line_number": 374, "usage_type": "name" }, { "api_name": "plotly.graph_objects.Scatter", "line_number": 377, "usage_type": "call" }, { "api_name": "plotly.graph_objects", "line_number": 377, "usage_type": "name" } ]
375335507
#!/usr/bin/env python3 # # Cyrius: CYP2D6 genotyper # Copyright (c) 2019-2020 Illumina, Inc. # # Author: Xiao Chen <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from collections import namedtuple import pysam from .utilities import open_alignment_file COMPLEMENT = {"A": "T", "T": "A", "C": "G", "G": "C", "N": "N"} SITES_STRINGENT = [] # consider being more stringent for exon8 site for SMN def reverse_complement(sequence): """Return the reverse complement of a sequence.""" return "".join(COMPLEMENT[b] for b in sequence[::-1]) def get_nm(ltag): """Return the value of the NM tag.""" for tag in ltag: if tag[0] == "NM": return tag[1] return None def get_snp_position(pos_file): """Get all base differences listed in the SNP location file.""" dsnp1 = {} dsnp2 = {} dindex = {} with open(pos_file) as read_pos: counter = -1 for line in read_pos: if line[0] != "#" and line[0] != "\n": counter += 1 split_line = line.strip().split() reg1_name = split_line[1] + "_" + str(counter) reg2_name = split_line[3] + "_" + str(counter) reg1_base = split_line[2].upper() reg2_base = split_line[4].upper() if split_line[-1] != "-": dsnp1.setdefault(reg1_name, "_".join([reg1_base, reg2_base])) dsnp2.setdefault(reg2_name, "_".join([reg1_base, reg2_base])) else: dsnp1.setdefault( reg1_name, "_".join([reg1_base, reverse_complement(reg2_base)]) ) dsnp2.setdefault( reg2_name, "_".join([reverse_complement(reg1_base), reg2_base]) ) dindex.setdefault(reg1_name, counter) dindex.setdefault(reg2_name, counter) nchr = split_line[0] snp_lookup = namedtuple("snp_lookup", "dsnp1 dsnp2 nchr dindex") dbsnp = snp_lookup(dsnp1, dsnp2, nchr, dindex) return dbsnp def passing_read(pileupread): """Return whether a read passes filter.""" return ( not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_secondary == 0 and pileupread.alignment.is_supplementary == 0 and pileupread.alignment.is_duplicate == 0 ) def passing_read_stringent(pileupread): """Return whether a read passes more stringent filter.""" number_mismatch = get_nm(pileupread.alignment.tags) align_len = pileupread.alignment.query_alignment_length read_len = len(pileupread.alignment.query_sequence) return ( number_mismatch <= float(align_len) * 0.08 and pileupread.query_position > 0 and pileupread.query_position < read_len - 1 ) def get_reads_by_region(bamfile_handle, nchr, dsnp, dindex, min_mapq=0): """ Return the number of reads supporting region1 and region2. """ lsnp1 = [] lsnp2 = [] for _ in dsnp: lsnp1.append(set()) lsnp2.append(set()) for snp_position_ori in dsnp: snp_position = int(snp_position_ori.split("_")[0]) for pileupcolumn in bamfile_handle.pileup( nchr, snp_position - 1, snp_position + 1, truncate=True, stepper="nofilter", ignore_overlaps=False, ignore_orphan=False, ): site_position = pileupcolumn.pos + 1 if site_position == snp_position: reg1_allele, reg2_allele = dsnp[snp_position_ori].split("_") for read in pileupcolumn.pileups: if ( passing_read(read) and read.alignment.mapping_quality >= min_mapq ): dsnp_index = dindex[snp_position_ori] read_name = read.alignment.query_name read_seq = read.alignment.query_sequence if ( site_position not in SITES_STRINGENT or passing_read_stringent(read) ): reg1_allele_split = reg1_allele.split(",") reg2_allele_split = reg2_allele.split(",") start_pos = read.query_position for allele in reg1_allele_split: end_pos = start_pos + len(allele) if read_seq[start_pos:end_pos] == allele: lsnp1[dsnp_index].add(read_name) for allele in reg2_allele_split: end_pos = start_pos + len(allele) if read_seq[start_pos:end_pos] == allele: lsnp2[dsnp_index].add(read_name) return [len(a) for a in lsnp1], [len(a) for a in lsnp2] def get_fraction(lsnp1, lsnp2): """Return the fraction of reads supporting region1.""" reg1_fraction = [] for index in range(len(lsnp1)): sumdepth = lsnp1[index] + lsnp2[index] if sumdepth == 0: reg1_fraction.append(0) else: reg1_fraction.append(float(lsnp1[index]) / float(sumdepth)) return reg1_fraction def get_supporting_reads(bamf, dsnp1, dsnp2, nchr, dindex, reference=None): """ Return the number of supporting reads at each position in both region1 and region2. """ bamfile_handle = open_alignment_file(bamf, reference) assert len(dsnp1) == len(dsnp2) # Go through SNP sites in both regions, # and count the number of reads supporting each gene. lsnp1_reg1, lsnp2_reg1 = get_reads_by_region(bamfile_handle, nchr, dsnp1, dindex) lsnp1_reg2, lsnp2_reg2 = get_reads_by_region(bamfile_handle, nchr, dsnp2, dindex) lsnp1 = [sum(x) for x in zip(lsnp1_reg1, lsnp1_reg2)] lsnp2 = [sum(x) for x in zip(lsnp2_reg1, lsnp2_reg2)] bamfile_handle.close() return lsnp1, lsnp2 def get_supporting_reads_single_region(bamf, dsnp1, nchr, dindex, reference=None): """ Return the number of supporting reads at each position only in region1. """ bamfile_handle = open_alignment_file(bamf, reference) lsnp1, lsnp2 = get_reads_by_region(bamfile_handle, nchr, dsnp1, dindex, 10) bamfile_handle.close() return lsnp1, lsnp2
null
depth_calling/snp_count.py
snp_count.py
py
7,095
python
en
code
null
code-starcoder2
83
[ { "api_name": "collections.namedtuple", "line_number": 73, "usage_type": "call" }, { "api_name": "utilities.open_alignment_file", "line_number": 168, "usage_type": "call" }, { "api_name": "utilities.open_alignment_file", "line_number": 184, "usage_type": "call" } ]
482229088
# uncompyle6 version 3.7.4 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) # [GCC 8.4.0] # Embedded file name: /home/gyst/plonesocial.buildout/src/plonesocial.microblog/plonesocial/microblog/utils.py # Compiled at: 2014-01-17 08:56:06 import time from BTrees import LLBTree from .interfaces import IMicroblogContext def get_microblog_context(context): if context is None: return else: if IMicroblogContext.providedBy(context): return context try: chain = context.aq_inner.aq_chain except AttributeError: return for item in chain: if IMicroblogContext.providedBy(item): return item else: return return def longkeysortreverse(btreeish, minv=None, maxv=None, limit=None): """Performance optimized keyspace accessor. Returns an iterable of btreeish keys, reverse sorted by key. Expects a btreeish with long(microsec) keys. """ try: accessor = btreeish.keys except AttributeError: accessor = LLBTree.TreeSet(btreeish).keys i = 0 if minv or maxv: keys = [ x for x in accessor(min=minv, max=maxv) ] keys.sort() keys.reverse() for key in keys: yield key i += 1 if i == limit: return else: tmax = long(time.time() * 1000000.0) tmin = long(tmax - 3600000000.0) keys = [ x for x in accessor(min=tmin, max=tmax) ] keys.sort() keys.reverse() for key in keys: yield key i += 1 if i == limit: return tmax = tmin tmin = long(tmax - 82800000000.0) keys = [ x for x in accessor(min=tmin, max=tmax) ] keys.sort() keys.reverse() for key in keys: yield key i += 1 if i == limit: return tmax = tmin keys = [ x for x in accessor(max=tmax) ] keys.sort() keys.reverse() for key in keys: yield key i += 1 if i == limit: return
null
pycfiles/plonesocial.microblog-0.5.3/utils.py
utils.py
py
2,225
python
en
code
null
code-starcoder2
83
[ { "api_name": "interfaces.IMicroblogContext.providedBy", "line_number": 15, "usage_type": "call" }, { "api_name": "interfaces.IMicroblogContext", "line_number": 15, "usage_type": "name" }, { "api_name": "interfaces.IMicroblogContext.providedBy", "line_number": 23, "usage_type": "call" }, { "api_name": "interfaces.IMicroblogContext", "line_number": 23, "usage_type": "name" }, { "api_name": "BTrees.LLBTree.TreeSet", "line_number": 39, "usage_type": "call" }, { "api_name": "BTrees.LLBTree", "line_number": 39, "usage_type": "name" }, { "api_name": "time.time", "line_number": 53, "usage_type": "call" } ]
446145063
import time from threading import Thread import json import requests from pprint import pprint def parse(json1): data = json1['data'] # data_id return data def gogo(page_idx, results): url = 'https://data.mafra.go.kr/opendata/data/open/getDataListPage.do' data = requests.post(url, data={ 'cur_page':page_idx, 'rows':10 }) res = parse(data.json()) results[page_idx] = res print('thread {} finished'.format(page_idx)) total_pages = 107 + 1 # index๋Š” 0๋ฒˆ๋ถ€ํ„ฐ page๋Š” 1๋ฒˆ๋ถ€ํ„ฐ # global์— variable์„ ์ƒ์„ฑ ํ•ด์ค€๋‹ค threads = [None] * total_pages results = [None] * total_pages start = time.time() for i in range(1, total_pages): threads[i] = Thread(target=gogo, args=(i, results)) threads[i].start() print('thread {} started'.format(i)) for i in range(1, total_pages): threads[i].join() # time.sleep(total_pages / 3) ''' for i in range(1, total_pages): gogo(i, results) ''' cnt = 0 result_total = [] for result in results: cnt += 1 # print(cnt, end='') if result != None: #print(len(result), result) result_total += result print(len(result_total[1])) print(time.time()-start) with open("test.json", "w", encoding="utf-8") as make_file: json.dump(results, make_file, ensure_ascii=False, indent="\t") # json_save(result_total, 'mafra_total_data.json')
null
ver.4/1_multithread_example_code.py
1_multithread_example_code.py
py
1,368
python
en
code
null
code-starcoder2
83
[ { "api_name": "requests.post", "line_number": 14, "usage_type": "call" }, { "api_name": "time.time", "line_number": 28, "usage_type": "call" }, { "api_name": "threading.Thread", "line_number": 31, "usage_type": "call" }, { "api_name": "time.time", "line_number": 51, "usage_type": "call" }, { "api_name": "json.dump", "line_number": 54, "usage_type": "call" } ]
308262569
from argparse import ArgumentParser import test_method1 as TM1 if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('--addr', type=str, default='http://lando.sytes.net', help='Server domain name/ip address') parser.add_argument('--port', type=int, default=8888, help='Server port number') parser.add_argument('--end_point', type=str, default='status', help='API Endpoint') parser.add_argument('--verbose', action='store_true', help='Print verbosity') parser.add_argument('--conc', type=int, default=1, help='Number of concurrent requests') parser.add_argument('--req_num', type=int, default=1, help='Number of requests') parser.add_argument('--test_mode', type=str, default='STATUS', choices=['STATUS', 'FILE'], help='Select the mode of testing') args = parser.parse_args() if args.test_mode == 'STATUS': TM1.test1("%s:%s/%s" % (args.addr, args.port, args.end_point), args.verbose, args.conc, args.req_num) elif args.test_mode == 'FILE': TM1.test2("%s:%s/test/file" % (args.addr, args.port), args.verbose, args.conc, args.req_num)
null
client/server-benchmark.py
server-benchmark.py
py
1,166
python
en
code
null
code-starcoder2
83
[ { "api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call" }, { "api_name": "test_method1.test1", "line_number": 18, "usage_type": "call" }, { "api_name": "test_method1.test2", "line_number": 21, "usage_type": "call" } ]
385399490
from selenium import webdriver from selenium.webdriver.chrome.options import Options import logging import argparse import time def writeTofile(str, filename="novel.txt"): with open(filename, 'a') as f: f.write(str) f.write("\n\n") def log(logger): logger.setLevel(logging.DEBUG) fh = logging.StreamHandler() fh.setLevel(logging.DEBUG) fmt = "%(asctime)s %(lineno)d %(message)s" datefmt = "%H:%M:%s" formatter = logging.Formatter(fmt, datefmt) fh.setFormatter(formatter) logger.addHandler(fh) def nextPage(driver, next_id, end_id, content_id, num): driver.set_window_size(1280, 800) logger.debug(num) chapter = "Chapter" content = chapter + str(num) + "\n" + driver.find_element_by_id(content_id).text writeTofile(content) time.sleep(2) next_element_div = driver.find_element_by_id(next_id) next_element = next_element_div.find_elements_by_xpath("a")[-1] href = str(next_element.get_attribute("href")) if end_id in href: return else: next_element.click() num += 1 nextPage(driver, next_id, end_id, content_id,num) def main(): chrome_option = Options() chrome_option.add_argument("--headless") driver = webdriver.Chrome(chrome_options=chrome_option) driver.get("http://www.wutuxs.com/html/3/3740/2576260.html") content_id = "contents" next_id = "footlink" end_id = "index.html" nextPage(driver, next_id, end_id, content_id, 1) logger_name = "novel" logger = logging.getLogger(logger_name) log(logger) logger.debug("test") main()
null
novel/panlong.py
panlong.py
py
1,606
python
en
code
null
code-starcoder2
83
[ { "api_name": "logging.DEBUG", "line_number": 15, "usage_type": "attribute" }, { "api_name": "logging.StreamHandler", "line_number": 16, "usage_type": "call" }, { "api_name": "logging.DEBUG", "line_number": 17, "usage_type": "attribute" }, { "api_name": "logging.Formatter", "line_number": 20, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 32, "usage_type": "call" }, { "api_name": "selenium.webdriver.chrome.options.Options", "line_number": 49, "usage_type": "call" }, { "api_name": "selenium.webdriver.Chrome", "line_number": 52, "usage_type": "call" }, { "api_name": "selenium.webdriver", "line_number": 52, "usage_type": "name" }, { "api_name": "logging.getLogger", "line_number": 64, "usage_type": "call" } ]
291315932
#!/usr/bin/env python # -*- coding: iso-8859-1 -*- # # Copylefth (c) 2009, Grudejo: # Aline Grazielle Silva Reis # Julia Carmona Almeida Chaves # Luziany Maria de Oliveira # Joyce Karoline Dare # Prof. Douglas Machado Tavares # import pygame from pygame.constants import * class Ator(pygame.sprite.Sprite): """ Classe Ator """ def __init__(self, pos_x=0, pos_y=0): """ Construtor: __init__()) -> instancia de ator """ pygame.sprite.Sprite.__init__(self) self.poses = [] self.__pt_pose = 0 # ponteiro para pose atual. self.__pos_x = pos_x self.__pos_y = pos_y def inserir_pose(self, nome_arq_img): """ Armazena uma 'surface' dentro da lista poses """ self.poses.append(pygame.image.load(nome_arq_img)) self.image = self.poses[self.__pt_pose] self.rect = self.image.get_rect() self.rect.x = self.__pos_x self.rect.y = self.__pos_y def update(self): """ Reimplementa updade() """ self.__pt_pose = self.__pt_pose % len(self.poses) self.image = self.poses[self.__pt_pose] self.__pt_pose += 1 class Jogo: """ Classe Jogo """ def __init__(self): """ Construtor: __init__() -> instancia de jogo """ pygame.init() self.tela = pygame.display.set_mode((800, 600)) def criar_atores(self): """ Cria os atores """ self.paola = Ator(0, 100) for x in range(1, 5): self.paola.inserir_pose("paola_ED_%02i.png" % x) self.grupo_atores = pygame.sprite.RenderPlain((self.paola)) def atualizar_atores(self): """ Atualiza os atores """ ret_tela = self.tela.get_rect() if (self.paola.rect.x < ret_tela.width - self.paola.rect.width): self.paola.rect.x += 6 def repintar_tela(self): """ Repinta a tela """ self.tela.fill((180, 180, 180)) self.grupo_atores.update() self.grupo_atores.draw(self.tela) pygame.display.flip() def tratar_eventos_teclado(self, evento): """ Observa e trata os eventos """ tecla = evento.key if ((tecla == K_ESCAPE) or (tecla == K_q)): raise SystemExit def tratar_eventos(self): """ Observa e trata os eventos """ for evento in pygame.event.get(): if (evento.type == QUIT): raise SystemExit if (evento.type == KEYDOWN): self.tratar_eventos_teclado(evento) def rodar(self): """ Roda o jogo """ self.criar_atores() FPS = 8 relogio = pygame.time.Clock() while (True): self.tratar_eventos() self.atualizar_atores() self.repintar_tela() relogio.tick(FPS) if (__name__ == "__main__"): jogo = Jogo() jogo.rodar()
null
src/etapa_03/jogo_v03.py
jogo_v03.py
py
2,984
python
en
code
null
code-starcoder2
83
[ { "api_name": "pygame.sprite", "line_number": 17, "usage_type": "attribute" }, { "api_name": "pygame.sprite.Sprite.__init__", "line_number": 22, "usage_type": "call" }, { "api_name": "pygame.sprite", "line_number": 22, "usage_type": "attribute" }, { "api_name": "pygame.image.load", "line_number": 31, "usage_type": "call" }, { "api_name": "pygame.image", "line_number": 31, "usage_type": "attribute" }, { "api_name": "pygame.init", "line_number": 52, "usage_type": "call" }, { "api_name": "pygame.display.set_mode", "line_number": 53, "usage_type": "call" }, { "api_name": "pygame.display", "line_number": 53, "usage_type": "attribute" }, { "api_name": "pygame.sprite.RenderPlain", "line_number": 61, "usage_type": "call" }, { "api_name": "pygame.sprite", "line_number": 61, "usage_type": "attribute" }, { "api_name": "pygame.display.flip", "line_number": 76, "usage_type": "call" }, { "api_name": "pygame.display", "line_number": 76, "usage_type": "attribute" }, { "api_name": "pygame.event.get", "line_number": 88, "usage_type": "call" }, { "api_name": "pygame.event", "line_number": 88, "usage_type": "attribute" }, { "api_name": "pygame.time.Clock", "line_number": 99, "usage_type": "call" }, { "api_name": "pygame.time", "line_number": 99, "usage_type": "attribute" } ]
141813354
import bpy # Comprimentos (x, y e z) do degrau superior length1 = 3 length2 = 2 length3 = 1 # Quantidade de degraus steps = 5 def criaDegrau(length1, length2, length3, times): if times == 0: return times # Replica o andar imediatamente superior e faz as translaรงรตes necessรกrias para o degrau seguinte bpy.ops.object.duplicate_move(OBJECT_OT_duplicate={"linked":False, "mode":"TRANSLATION"}, TRANSFORM_OT_translate={"value":(0,0,-length3)}) bpy.ops.transform.translate(value=(length1/2,0,0)) bpy.ops.transform.translate(value=(0,length2/2,0)) bpy.ops.object.duplicate_move(OBJECT_OT_duplicate={"linked":False, "mode":"TRANSLATION"}, TRANSFORM_OT_translate={"value":(-length1,0,0)}) bpy.ops.object.duplicate_move(OBJECT_OT_duplicate={"linked":False, "mode":"TRANSLATION"}, TRANSFORM_OT_translate={"value":(0,-length2,0)}) bpy.ops.object.duplicate_move(OBJECT_OT_duplicate={"linked":False, "mode":"TRANSLATION"}, TRANSFORM_OT_translate={"value":(length1,0,0)}) # Une os elementos em um MESH bpy.ops.object.select_by_type(type='MESH') bpy.ops.object.join() # Elimina eventuais repetiรงรตes de elementos, para evitar sobrecarregar o peso do programa bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action='TOGGLE') bpy.ops.mesh.remove_doubles() bpy.ops.mesh.select_all(action='TOGGLE') bpy.ops.object.editmode_toggle() # Repete o processo para o degrau seguinte times = times - 1 criaDegrau(length1, length2, length3, times) # Define os vรฉrtices e faces verts = [(0,0,0),(0,length2,0),(length1,length2,0),(length1,0,0),(0,0,length3),(0,length2,length3),(length1,length2,length3),(length1,0,length3)] faces = [(0,1,2,3), (4,5,6,7), (0,4,5,1), (1,5,6,2), (2,6,7,3), (3,7,4,0)] # Define o mesh e o objeto mesh = bpy.data.meshes.new("Paralelepipedo") object = bpy.data.objects.new("Paralelepipedo", mesh) #Set location and scene of object object.location = bpy.context.scene.cursor_location bpy.context.scene.objects.link(object) # Cria o mesh mesh.from_pydata(verts,[],faces) mesh.update(calc_edges=True) # Ativa o objeto, para ser usado nas seguintes recursรตes bpy.context.scene.objects.active = object criaDegrau(length1, length2, length3, steps)
null
tower-recursion.py
tower-recursion.py
py
2,278
python
en
code
null
code-starcoder2
83
[ { "api_name": "bpy.ops.object.duplicate_move", "line_number": 16, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 16, "usage_type": "attribute" }, { "api_name": "bpy.ops.transform.translate", "line_number": 17, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 17, "usage_type": "attribute" }, { "api_name": "bpy.ops.transform.translate", "line_number": 18, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 18, "usage_type": "attribute" }, { "api_name": "bpy.ops.object.duplicate_move", "line_number": 19, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 19, "usage_type": "attribute" }, { "api_name": "bpy.ops.object.duplicate_move", "line_number": 20, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 20, "usage_type": "attribute" }, { "api_name": "bpy.ops.object.duplicate_move", "line_number": 21, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 21, "usage_type": "attribute" }, { "api_name": "bpy.ops.object.select_by_type", "line_number": 24, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 24, "usage_type": "attribute" }, { "api_name": "bpy.ops.object.join", "line_number": 25, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 25, "usage_type": "attribute" }, { "api_name": "bpy.ops.object.editmode_toggle", "line_number": 28, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 28, "usage_type": "attribute" }, { "api_name": "bpy.ops.mesh.select_all", "line_number": 29, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 29, "usage_type": "attribute" }, { "api_name": "bpy.ops.mesh.remove_doubles", "line_number": 30, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 30, "usage_type": "attribute" }, { "api_name": "bpy.ops.mesh.select_all", "line_number": 31, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 31, "usage_type": "attribute" }, { "api_name": "bpy.ops.object.editmode_toggle", "line_number": 32, "usage_type": "call" }, { "api_name": "bpy.ops", "line_number": 32, "usage_type": "attribute" }, { "api_name": "bpy.data.meshes.new", "line_number": 45, "usage_type": "call" }, { "api_name": "bpy.data", "line_number": 45, "usage_type": "attribute" }, { "api_name": "bpy.data.objects.new", "line_number": 46, "usage_type": "call" }, { "api_name": "bpy.data", "line_number": 46, "usage_type": "attribute" }, { "api_name": "bpy.context", "line_number": 49, "usage_type": "attribute" }, { "api_name": "bpy.context.scene.objects.link", "line_number": 50, "usage_type": "call" }, { "api_name": "bpy.context", "line_number": 50, "usage_type": "attribute" }, { "api_name": "bpy.context", "line_number": 57, "usage_type": "attribute" } ]
186141521
import argparse as arg parser = arg.ArgumentParser() parser.add_argument("--way", type=str, help="ะ˜ะผั/ะŸัƒั‚ัŒ ั„ะฐะนะปะฐ ะดะปั ัะพั€ั‚ะธั€ะพะฒะบะธ", default=None) args = parser.parse_args() def MergeSort(text:list)->None: """This function sorted something""" if len(text) > 1: half = len(text)//2 left_part = text[:half] right_part = text[half:] MergeSort(left_part) MergeSort(right_part) i = 0 kley = text[:] while i < len(text): my_min = min(kley) text[i]=my_min del kley[kley.index(my_min)] i+=1 if not args.way: print("ะ’ะฒะตะดะธั‚ะต ะฟัƒั‚ัŒ ะบ ั„ะฐะนะปัƒ ") way = input() else: way = args.way new_way = ''.join(way.split('\\')[:-1]) + "out_file" file = open(way, 'r', encoding="utf-8") new_file = open(new_way, 'w', encoding="utf-8") for line in file: temp_line = line.split(' ') MergeSort(temp_line) new_file.write(' '.join(temp_line)+'\n')
null
lab_2/17_lab_2_3.py
17_lab_2_3.py
py
999
python
en
code
null
code-starcoder2
83
[ { "api_name": "argparse.ArgumentParser", "line_number": 2, "usage_type": "call" } ]
397896335
# Copyright 2019 Regents of the University of Minnesota. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from pathlib import Path from tempfile import TemporaryFile from mtap.io.serialization import JsonSerializer import mtap from mtap import GenericLabel from mtap.events import Event, Document def test_json_serializer(): event = Event(event_id='1') event.metadata['foo'] = "bar" document = Document('plaintext', text='Some text.') event.add_document(document) document.add_labels('one', [mtap.GenericLabel(start_index=0, end_index=5, x=10), mtap.GenericLabel(start_index=6, end_index=10, x=15)]) document.add_labels('two', [mtap.GenericLabel(start_index=0, end_index=25, a='b'), mtap.GenericLabel(start_index=26, end_index=42, a='c')]) document.add_labels('three', [ mtap.GenericLabel(start_index=0, end_index=10, foo=True), mtap.GenericLabel(start_index=11, end_index=15, foo=False) ], distinct=True) with TemporaryFile('w+') as tf: JsonSerializer.event_to_file(event, tf) tf.flush() tf.seek(0) o = json.load(tf) assert o['event_id'] == '1' assert o['metadata']['foo'] == 'bar' d = o['documents']['plaintext'] assert d['text'] == 'Some text.' assert len(d['label_indices']) == 3 assert d['label_indices']['one'] == { 'json_labels': [ { 'start_index': 0, 'end_index': 5, 'x': 10 }, { 'start_index': 6, 'end_index': 10, 'x': 15 } ], 'distinct': False } assert d['label_indices']['two'] == { 'json_labels': [ { 'start_index': 0, 'end_index': 25, 'a': 'b' }, { 'start_index': 26, 'end_index': 42, 'a': 'c' } ], 'distinct': False } assert d['label_indices']['three'] == { 'json_labels': [ { 'start_index': 0, 'end_index': 10, 'foo': True }, { 'start_index': 11, 'end_index': 15, 'foo': False } ], 'distinct': True } def test_deserialization(): f = Path(__file__).parent / 'event.json' event = JsonSerializer.file_to_event(f) assert event.event_id == '12345' assert event.metadata['foo'] == 'bar' d = event.documents['plaintext'] assert d.text == "The quick brown fox jumps over the lazy dog." assert len(d.get_label_indices_info()) == 3 assert d.get_label_index("one") == [ GenericLabel(start_index=0, end_index=10, a="b"), GenericLabel(start_index=12, end_index=25, a="c"), GenericLabel(start_index=26, end_index=52, a="d"), GenericLabel(start_index=53, end_index=85, a="e"), ] assert d.get_label_index("two") == [ GenericLabel(start_index=0, end_index=10, x=1), GenericLabel(start_index=3, end_index=9, x=3), GenericLabel(start_index=4, end_index=25, x=2), GenericLabel(start_index=5, end_index=25, x=4), ] assert d.get_label_index("three") == [ GenericLabel(start_index=0, end_index=10, x=True), GenericLabel(start_index=3, end_index=9, x=True), GenericLabel(start_index=4, end_index=25, x=False), GenericLabel(start_index=5, end_index=25, x=False), ]
null
python/tests/io/test_json.py
test_json.py
py
4,116
python
en
code
null
code-starcoder2
83
[ { "api_name": "mtap.events.Event", "line_number": 26, "usage_type": "call" }, { "api_name": "mtap.events.Document", "line_number": 28, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 30, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 31, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 32, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 33, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 35, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 36, "usage_type": "call" }, { "api_name": "tempfile.TemporaryFile", "line_number": 39, "usage_type": "call" }, { "api_name": "mtap.io.serialization.JsonSerializer.event_to_file", "line_number": 40, "usage_type": "call" }, { "api_name": "mtap.io.serialization.JsonSerializer", "line_number": 40, "usage_type": "name" }, { "api_name": "json.load", "line_number": 43, "usage_type": "call" }, { "api_name": "pathlib.Path", "line_number": 98, "usage_type": "call" }, { "api_name": "mtap.io.serialization.JsonSerializer.file_to_event", "line_number": 99, "usage_type": "call" }, { "api_name": "mtap.io.serialization.JsonSerializer", "line_number": 99, "usage_type": "name" }, { "api_name": "mtap.GenericLabel", "line_number": 106, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 107, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 108, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 109, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 112, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 113, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 114, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 115, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 118, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 119, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 120, "usage_type": "call" }, { "api_name": "mtap.GenericLabel", "line_number": 121, "usage_type": "call" } ]
464394400
from mpl_toolkits.basemap import Basemap, cm, shiftgrid,interp from netCDF4 import Dataset as NetCDFFile import numpy as N import matplotlib.pyplot as plt import glob import numpy.ma as ma from scipy.interpolate import griddata import scipy.stats from matplotlib.dates import DateFormatter import datetime country=NetCDFFile('/scratch2/scratchdirs/tslin2/plot/globalcrop/data/Ctry_halfdeg.nc','r') #print iizumi coun = country.variables['MASK_Country'][:,:] region1=NetCDFFile('/scratch2/scratchdirs/tslin2/plot/globalcrop/data/clm/HistoricalGLM_crop_150901.nc','r') maitrop = region1.variables['maize_trop'][:,:,:] maitemp = region1.variables['maize_temp'][:,:,:] maitropi=region1.variables['maize_trop_irrig'][:,:,:] maitempi=region1.variables['maize_temp_irrig'][:,:,:] gridarea = region1.variables['area'][:,:] maitrop=ma.masked_where(maitrop<=0,maitrop) maitrop=ma.filled(maitrop, fill_value=0.) maitemp=ma.masked_where(maitemp<=0,maitemp) maitemp=ma.filled(maitemp, fill_value=0.) maitropi=ma.masked_where(maitropi<=0,maitropi) maitropi=ma.filled(maitropi, fill_value=0.) maitempi=ma.masked_where(maitempi<=0,maitempi) maitempi=ma.filled(maitempi, fill_value=0.) maizetor=maitrop+maitemp maizetoi=maitropi+maitempi maizeto = maitrop+maitemp+maitropi+maitempi clm3n2=NetCDFFile('/scratch2/scratchdirs/tslin2/isam/cheyenne/plot/finalyield/isam/heat/fertfao/new1/isamhiscru_maiscaleiyield_fertfao_new1.nc','r') isamcrumai2 = clm3n2.variables['yield'][:,:,:] isamcrumai2= ma.masked_where(isamcrumai2<=0,isamcrumai2) isamcrumai2=ma.filled(isamcrumai2, fill_value=0.) isamcrumai2[N.isnan(isamcrumai2)] = 0 #isamcrumai2= ma.masked_where(isamcrumai2<=0,isamcrumai2) isamcrumai2=ma.filled(isamcrumai2, fill_value=0.) yynew=N.zeros((105,360)) for t1 in range(0,105): for xx in range(0,360): harea=0 yieldic2=0 for yy in range(0,720): if maizeto[t1,xx,yy]>0.0: harea=maizeto[t1,xx,yy]*gridarea[xx,yy]+ harea yieldic2=(isamcrumai2[t1,xx,yy]*maizeto[t1,xx,yy]*gridarea[xx,yy])+yieldic2 if harea>0.0: yynew[t1,xx]=yieldic2/harea yynew[N.isnan(yynew)] = -1 yynew= ma.masked_where(yynew<=0,yynew) #print yynew[99,:].shape #print yynew[99,:] fig = plt.figure(figsize=(15,6)) tt = N.arange(-89.75, 90., 0.5) ax = fig.add_subplot(111) plt.plot(tt,N.average(yynew[90:100,:],axis=0),'r-') plt.plot(tt,N.average(yynew[60:70,:],axis=0),'k-') plt.plot(tt,N.average(yynew[10:20,:],axis=0),'b-') leg=plt.legend(['1991-2000','1961-1970','1911-1920'],fontsize=18) leg.get_frame().set_alpha(0.5) #plt.xticks(N.arange(tt.min(), tt.max(), 30)) plt.axis([-90, 90, 0, 15]) ax.set_xticks((-75,-60,-45,-30,-15,0,15,30,45,60,75)) my_xticks = ['-75 S','-60 S','-45 S','-30 S','-15 S', 'EQ','15 N','30 N','45 N','60 N','75 N'] #plt.xticks(x, my_xticks) ax.set_xticklabels(my_xticks, fontsize=18) plt.xlabel("Latitude ",fontsize=18) plt.ylabel("Maize yields (t/ha)",fontsize=18) plt.tick_params(axis='both',labelsize=18) plt.savefig('maize_1961_2016_lat.png',dpi=600,bbox_inches='tight') plt.show()
null
plot/detrend/mai_ann_globe_p3.py
mai_ann_globe_p3.py
py
3,013
python
en
code
null
code-starcoder2
83
[ { "api_name": "netCDF4.Dataset", "line_number": 13, "usage_type": "call" }, { "api_name": "netCDF4.Dataset", "line_number": 18, "usage_type": "call" }, { "api_name": "numpy.ma.masked_where", "line_number": 24, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 24, "usage_type": "name" }, { "api_name": "numpy.ma.filled", "line_number": 25, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 25, "usage_type": "name" }, { "api_name": "numpy.ma.masked_where", "line_number": 26, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 26, "usage_type": "name" }, { "api_name": "numpy.ma.filled", "line_number": 27, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 27, "usage_type": "name" }, { "api_name": "numpy.ma.masked_where", "line_number": 29, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 29, "usage_type": "name" }, { "api_name": "numpy.ma.filled", "line_number": 30, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 30, "usage_type": "name" }, { "api_name": "numpy.ma.masked_where", "line_number": 31, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 31, "usage_type": "name" }, { "api_name": "numpy.ma.filled", "line_number": 32, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 32, "usage_type": "name" }, { "api_name": "netCDF4.Dataset", "line_number": 38, "usage_type": "call" }, { "api_name": "numpy.ma.masked_where", "line_number": 40, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 40, "usage_type": "name" }, { "api_name": "numpy.ma.filled", "line_number": 41, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 41, "usage_type": "name" }, { "api_name": "numpy.isnan", "line_number": 42, "usage_type": "call" }, { "api_name": "numpy.ma.filled", "line_number": 44, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 44, "usage_type": "name" }, { "api_name": "numpy.zeros", "line_number": 46, "usage_type": "call" }, { "api_name": "numpy.isnan", "line_number": 59, "usage_type": "call" }, { "api_name": "numpy.ma.masked_where", "line_number": 60, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 60, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.figure", "line_number": 66, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name" }, { "api_name": "numpy.arange", "line_number": 68, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.plot", "line_number": 70, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name" }, { "api_name": "numpy.average", "line_number": 70, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.plot", "line_number": 71, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name" }, { "api_name": "numpy.average", "line_number": 71, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name" }, { "api_name": "numpy.average", "line_number": 72, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.legend", "line_number": 74, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.axis", "line_number": 77, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.xlabel", "line_number": 83, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.ylabel", "line_number": 84, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.tick_params", "line_number": 85, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.savefig", "line_number": 88, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.show", "line_number": 89, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name" } ]
554968815
#!/usr/bin/env python # coding: utf-8 import argparse import json import os import random import cv2 import pandas as pd from tqdm import tqdm def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--deciding_only", action="store_true") parser.add_argument("--bandwidth", type=int, default=66) parser.add_argument("--weight_clusters", action="store_true") return parser.parse_args() def main(): args = parse_args() data_dir = os.path.join( "correspondence_matching", "data", f"BW{args.bandwidth}_DecidingOnly{args.deciding_only}" ) weighted_str = "weighted" if args.weight_clusters else "nonweighted" for outcome in ["FP", "FN", "TP", "TN", "Inc"]: os.makedirs(os.path.join(data_dir, weighted_str, outcome)) with open(os.path.join(data_dir, weighted_str, "matched_cluster_data.json"), "r") as f: matched_cluster_data = json.load(f) trial_stats = pd.read_csv( os.path.join("data", "CwCeTrialStats_20200324.csv"), usecols=["ImagePair", "Examiner", "Outcome"], ) for image_pair in tqdm(matched_cluster_data): pair_data = matched_cluster_data[image_pair] for examiner in pair_data: trial_data = pair_data[examiner] # Pull outcome from trial_stats outcome = trial_stats[ (trial_stats["ImagePair"] == image_pair) & (trial_stats["Examiner"] == examiner) ]["Outcome"] assert outcome.shape[0] == 1 outcome = outcome.iloc[0] left_img = cv2.imread(os.path.join("WBeyeDataset105", f"{image_pair}_Left.png")) right_img = cv2.imread(os.path.join("WBeyeDataset105", f"{image_pair}_Right.png")) images = {"Left": left_img, "Right": right_img} assert len(trial_data["Left"]) == len(trial_data["Right"]) num_points = len(trial_data["Left"]) colors = dict() for i in range(num_points): colors[i] = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for side in images: for i in range(num_points): coord = trial_data[side][i] x = int(coord[0]) y = int(coord[1]) cv2.circle( img=images[side], center=(x, y), radius=10, color=colors[i], thickness=-1, ) cv2.imwrite( os.path.join( data_dir, weighted_str, outcome, f"{image_pair}_{examiner}_{side}.jpg" ), images[side], ) if __name__ == "__main__": main()
null
python/correspondence_matching/draw_matchings.py
draw_matchings.py
py
2,810
python
en
code
null
code-starcoder2
83
[ { "api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 26, "usage_type": "call" }, { "api_name": "os.path", "line_number": 26, "usage_type": "attribute" }, { "api_name": "os.makedirs", "line_number": 32, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 32, "usage_type": "call" }, { "api_name": "os.path", "line_number": 32, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 34, "usage_type": "call" }, { "api_name": "os.path", "line_number": 34, "usage_type": "attribute" }, { "api_name": "json.load", "line_number": 35, "usage_type": "call" }, { "api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 38, "usage_type": "call" }, { "api_name": "os.path", "line_number": 38, "usage_type": "attribute" }, { "api_name": "tqdm.tqdm", "line_number": 42, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 54, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 54, "usage_type": "call" }, { "api_name": "os.path", "line_number": 54, "usage_type": "attribute" }, { "api_name": "cv2.imread", "line_number": 55, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 55, "usage_type": "call" }, { "api_name": "os.path", "line_number": 55, "usage_type": "attribute" }, { "api_name": "random.randint", "line_number": 62, "usage_type": "call" }, { "api_name": "cv2.circle", "line_number": 69, "usage_type": "call" }, { "api_name": "cv2.imwrite", "line_number": 77, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 78, "usage_type": "call" }, { "api_name": "os.path", "line_number": 78, "usage_type": "attribute" } ]
497243580
from datetime import datetime from GA import GA from Colorharmony import ColorHarmony import cv2 import numpy import PIL as PIL from PIL import ImageDraw,Image from skimage.color import lab2rgb import numpy as np from sklearn.cluster import MeanShift, estimate_bandwidth import time class Solver: ##ganti nama jadi solver def get_num_pixels(filepath): width, height = PIL.Image.open(filepath).size return width * height def Skripsi (PosterInput,ScoreSebelum): t0 = time.time() ####################################################################################################### ### Meanshif the image using OpenCV to reduce colors ### ####################################################################################################### # PosterInput = cv2.imread(Poster) Shape = PosterInput.shape flatImg=np.reshape(PosterInput, [-1, 3]) bandwidth = estimate_bandwidth(flatImg, quantile=0.2, n_samples=100) ms = MeanShift(bandwidth = bandwidth, bin_seeding=True) ms.fit(flatImg) labels=ms.labels_ cluster_centers = ms.cluster_centers_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) segmentedImg = cluster_centers[np.reshape(labels, Shape[:2])] cv2.imwrite("hasilmean.jpg", segmentedImg.astype(np.uint8)) image = PIL.Image.open("hasilmean.jpg") color_count = {} width, height = image.size rgb_image = image.convert('RGB') imgtemp = cv2.imread("hasilmean.jpg") rows, cols, depth = imgtemp.shape # buat R,G,B sama posisi X dan Y temps = np.zeros((rows * cols, 5)) k = 0 # masukin warna prtama R kedua G ketiga B keempat posisi X kelima posisi Y for p in range(0, rows): for f in range(0, cols): temps[k][0] = imgtemp[p][f][0] temps[k][1] = imgtemp[p][f][1] temps[k][2] = imgtemp[p][f][2] temps[k][3] = p temps[k][4] = f k = k + 1 for x in range(width): for y in range(height): rgb = rgb_image.getpixel((x, y)) if rgb in color_count: color_count[rgb] += 1 else: color_count[rgb] = 1 jumlahpixel = Solver.get_num_pixels("hasilmean.jpg") # ************************************* # ******** MAIN ALGORITHM CODE ******** # ************************************* maximum_generation = 1 s,b,dom,domsmall = ColorHarmony.region(color_count,jumlahpixel) scoreawal = ColorHarmony.calculate_fitness(b, s)/2 #GENERATE BIG COLOR for generation in range(maximum_generation): new_population = [] new_populationsmall=[] # new_populationsmall.append(rgb2lab(domsmall)) new_population.append(ColorHarmony.rgb2lab(dom)) i=0 x=0 temp = False while i < 4: color = GA.select_initial_population(dom) new_population.append(color) i+=1 pop=GA.bigGA(new_population,0.5,dom) while x<5: color = GA.select_initial_population(domsmall) new_populationsmall.append(color) x += 1 poop=GA.smallGA(new_populationsmall,1,domsmall) FitnessScore= ColorHarmony.calculate_fitness(pop,poop) ct = 0 while FitnessScore <1.5 and FitnessScore > ScoreSebelum or ct>10: poop = GA.smallGA(new_populationsmall,0.5,domsmall) FitnessScore=ColorHarmony.calculate_fitness(pop,poop) ct+=1 w=0 rgb=[] rgbBig=[] v=0 while w < 5: color = (lab2rgb(poop[w],illuminant='D65', observer='2')* 254).astype(numpy.uint8) rgb.append(color) w+=1 while v < 5: colorbig = (lab2rgb(pop[v],illuminant='D65', observer='2')* 254).astype(numpy.uint8) rgbBig.append(colorbig) v+=1 im = PIL.Image.new('RGB', (300, 450), (255, 255, 255)) draw = ImageDraw.Draw(im) s = 0 while s < 5: draw.text((100, 10), "Your Primary Color", fill=(0, 0, 0)) if (len(pop) > 0): draw.rectangle((120, 50, 180, 120), fill=(tuple(dom))) else: draw.rectangle((120, 50, 180, 120), fill=(tuple(domsmall))) draw.text((100, 130), "Primary Selection", fill=(0, 0, 0)) draw.rectangle((s * 60, 150, 60 + (s * 60), 280), fill=(tuple(rgbBig[s]))) temp = ColorHarmony.rgb2hex(rgbBig[s][0],rgbBig[s][1],rgbBig[s][2]) draw.text((s*60+10, 260), temp, fill=(0, 0, 0)) draw.text((100, 300), "Secondary Selection", fill=(0, 0, 0)) draw.rectangle((s * 60, 320, 60 + (s * 60), 450), fill=(tuple(rgb[s]))) temps = ColorHarmony.rgb2hex(rgb[s][0], rgb[s][1], rgb[s][2]) draw.text((s * 60+10, 430), temps, fill=(0, 0, 0)) s+=1 timestamp = datetime.timestamp(datetime.now()) filename = 'web'+str(timestamp)+'.png' im.save('static/web'+str(timestamp)+'.png') print('Selesai') t1 = time.time() total = t1 - t0 return filename,FitnessScore,total,scoreawal
null
Solver.py
Solver.py
py
5,526
python
en
code
null
code-starcoder2
83
[ { "api_name": "PIL.Image.open", "line_number": 19, "usage_type": "call" }, { "api_name": "PIL.Image", "line_number": 19, "usage_type": "attribute" }, { "api_name": "time.time", "line_number": 23, "usage_type": "call" }, { "api_name": "numpy.reshape", "line_number": 33, "usage_type": "call" }, { "api_name": "sklearn.cluster.estimate_bandwidth", "line_number": 35, "usage_type": "call" }, { "api_name": "sklearn.cluster.MeanShift", "line_number": 36, "usage_type": "call" }, { "api_name": "numpy.unique", "line_number": 44, "usage_type": "call" }, { "api_name": "numpy.reshape", "line_number": 47, "usage_type": "call" }, { "api_name": "cv2.imwrite", "line_number": 49, "usage_type": "call" }, { "api_name": "numpy.uint8", "line_number": 49, "usage_type": "attribute" }, { "api_name": "PIL.Image.open", "line_number": 51, "usage_type": "call" }, { "api_name": "PIL.Image", "line_number": 51, "usage_type": "attribute" }, { "api_name": "cv2.imread", "line_number": 56, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 61, "usage_type": "call" }, { "api_name": "Colorharmony.ColorHarmony.region", "line_number": 98, "usage_type": "call" }, { "api_name": "Colorharmony.ColorHarmony", "line_number": 98, "usage_type": "name" }, { "api_name": "Colorharmony.ColorHarmony.calculate_fitness", "line_number": 101, "usage_type": "call" }, { "api_name": "Colorharmony.ColorHarmony", "line_number": 101, "usage_type": "name" }, { "api_name": "Colorharmony.ColorHarmony.rgb2lab", "line_number": 108, "usage_type": "call" }, { "api_name": "Colorharmony.ColorHarmony", "line_number": 108, "usage_type": "name" }, { "api_name": "GA.GA.select_initial_population", "line_number": 113, "usage_type": "call" }, { "api_name": "GA.GA", "line_number": 113, "usage_type": "name" }, { "api_name": "GA.GA.bigGA", "line_number": 117, "usage_type": "call" }, { "api_name": "GA.GA", "line_number": 117, "usage_type": "name" }, { "api_name": "GA.GA.select_initial_population", "line_number": 119, "usage_type": "call" }, { "api_name": "GA.GA", "line_number": 119, "usage_type": "name" }, { "api_name": "GA.GA.smallGA", "line_number": 123, "usage_type": "call" }, { "api_name": "GA.GA", "line_number": 123, "usage_type": "name" }, { "api_name": "Colorharmony.ColorHarmony.calculate_fitness", "line_number": 124, "usage_type": "call" }, { "api_name": "Colorharmony.ColorHarmony", "line_number": 124, "usage_type": "name" }, { "api_name": "GA.GA.smallGA", "line_number": 127, "usage_type": "call" }, { "api_name": "GA.GA", "line_number": 127, "usage_type": "name" }, { "api_name": "Colorharmony.ColorHarmony.calculate_fitness", "line_number": 128, "usage_type": "call" }, { "api_name": "Colorharmony.ColorHarmony", "line_number": 128, "usage_type": "name" }, { "api_name": "skimage.color.lab2rgb", "line_number": 135, "usage_type": "call" }, { "api_name": "numpy.uint8", "line_number": 135, "usage_type": "attribute" }, { "api_name": "skimage.color.lab2rgb", "line_number": 140, "usage_type": "call" }, { "api_name": "numpy.uint8", "line_number": 140, "usage_type": "attribute" }, { "api_name": "PIL.Image.new", "line_number": 145, "usage_type": "call" }, { "api_name": "PIL.Image", "line_number": 145, "usage_type": "attribute" }, { "api_name": "PIL.ImageDraw.Draw", "line_number": 146, "usage_type": "call" }, { "api_name": "PIL.ImageDraw", "line_number": 146, "usage_type": "name" }, { "api_name": "Colorharmony.ColorHarmony.rgb2hex", "line_number": 157, "usage_type": "call" }, { "api_name": "Colorharmony.ColorHarmony", "line_number": 157, "usage_type": "name" }, { "api_name": "Colorharmony.ColorHarmony.rgb2hex", "line_number": 161, "usage_type": "call" }, { "api_name": "Colorharmony.ColorHarmony", "line_number": 161, "usage_type": "name" }, { "api_name": "datetime.datetime.timestamp", "line_number": 164, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 164, "usage_type": "name" }, { "api_name": "datetime.datetime.now", "line_number": 164, "usage_type": "call" }, { "api_name": "time.time", "line_number": 168, "usage_type": "call" } ]
8599063
#! python3 import openpyxl, smtplib, sys wb = openpyxl.load_workbook('duesRecords.xlsx') sheet = wb.get_sheet_by_name('Sheet1') lastCol = sheet.get_highest_column() latestMonth = sheet.cell(row=1, column=lastCol).value for r in range(2, sheet.get_highest_row() + 1): payment = sheet.cell(row=r, column=lastCol).value if payment != 'paid': name = sheet.cell(row=r, column = 1).value email = sheet.cell(row=r,column=2).value unpaidMembers[name] = email smtpObj = smtplib.SMTP('smtp.gmail.com',587) smtpObj.ehlo() smtpObj.login('[email protected]', sys.argv[1]) for name, email in unpaidMembers.items(): body = "Subject: %s dues upaid. \nDear %s, \nRecords show that you have not paid dues for %s. Please make this payment." %(latestMonth, name, latestMonth) print('Seinding email to %s...' % email) sendmailstatus = smtpObj.sendmail('[email protected]', email,body) if sendmailstatus != {}: print("there was a problem sending email to %s: %s" % (email, sendmailstatus)) smtpObj.quit()
null
Automatetheboringstuff/email.py
email.py
py
1,010
python
en
code
null
code-starcoder2
83
[ { "api_name": "openpyxl.load_workbook", "line_number": 5, "usage_type": "call" }, { "api_name": "smtplib.SMTP", "line_number": 20, "usage_type": "call" }, { "api_name": "sys.argv", "line_number": 22, "usage_type": "attribute" } ]
284830231
from django.shortcuts import render, redirect from django.http import HttpResponse from street_biter_app.models import Street_biter from django.contrib.auth.decorators import login_required @login_required def home(request): if request.method == 'POST': form = Street_biter(request.POST) else: return render(request, 'street_biter_app/home.html') @login_required def about(request): return render(request, 'street_biter_app/about.html') @login_required def details(request, id): # sweetspot = Street_biter.objects.filter(user = request.user, latitude=request.latitude, longitude=request.longitude).first() sweetspot = Street_biter.objects.get(id = id) return render(request, 'street_biter_app/details.html', { 'sweetspot': sweetspot, }) def save_details(request): if request.method == 'POST': user = request.user latitude = request.POST['lat'] longitude = request.POST['long'] details = request.POST['details'] species = request.POST['species-list'] print(latitude) sweetspot = Street_biter.objects.create( user = user, latitude = latitude, longitude = longitude, details = details, species = species, ) return redirect('details-view', sweetspot.id) else: return render(request, 'street_biter_app/app-home') @login_required def my_sweetspots(request): sweetspots = Street_biter.objects.filter(user = request.user) context = { 'sweetspots' : sweetspots } return render(request, 'street_biter_app/sweetspots.html', context)
null
street_biter_app/views.py
views.py
py
1,668
python
en
code
null
code-starcoder2
83
[ { "api_name": "street_biter_app.models.Street_biter", "line_number": 9, "usage_type": "call" }, { "api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call" }, { "api_name": "django.contrib.auth.decorators.login_required", "line_number": 6, "usage_type": "name" }, { "api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call" }, { "api_name": "django.contrib.auth.decorators.login_required", "line_number": 13, "usage_type": "name" }, { "api_name": "street_biter_app.models.Street_biter.objects.get", "line_number": 20, "usage_type": "call" }, { "api_name": "street_biter_app.models.Street_biter.objects", "line_number": 20, "usage_type": "attribute" }, { "api_name": "street_biter_app.models.Street_biter", "line_number": 20, "usage_type": "name" }, { "api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call" }, { "api_name": "django.contrib.auth.decorators.login_required", "line_number": 17, "usage_type": "name" }, { "api_name": "street_biter_app.models.Street_biter.objects.create", "line_number": 33, "usage_type": "call" }, { "api_name": "street_biter_app.models.Street_biter.objects", "line_number": 33, "usage_type": "attribute" }, { "api_name": "street_biter_app.models.Street_biter", "line_number": 33, "usage_type": "name" }, { "api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call" }, { "api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call" }, { "api_name": "street_biter_app.models.Street_biter.objects.filter", "line_number": 47, "usage_type": "call" }, { "api_name": "street_biter_app.models.Street_biter.objects", "line_number": 47, "usage_type": "attribute" }, { "api_name": "street_biter_app.models.Street_biter", "line_number": 47, "usage_type": "name" }, { "api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call" }, { "api_name": "django.contrib.auth.decorators.login_required", "line_number": 45, "usage_type": "name" } ]
213842796
import sys, os import pandas as pd os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tfn.settings") import django django.setup() from review.models import Whiskey,Distillery def load_from_csv(row_of_csv): whiskey=Whiskey() whiskey.id=row_of_csv[0] whiskey.distillery=Distillery.objects.get(id=row_of_csv[0]) whiskey.save() if __name__ == "__main__": if len(sys.argv) == 2: print ("Loading from file" + str(sys.argv[1])) base_df = pd.read_csv(sys.argv[1]) print (base_df) base_df.apply( load_from_csv, axis=1 ) print ("There are {} reviews in DB".format(Whiskey.objects.count())) else: print ("Please, provide Reviews file path")
null
load_whiskey_base.py
load_whiskey_base.py
py
742
python
en
code
null
code-starcoder2
83
[ { "api_name": "os.environ.setdefault", "line_number": 4, "usage_type": "call" }, { "api_name": "os.environ", "line_number": 4, "usage_type": "attribute" }, { "api_name": "django.setup", "line_number": 7, "usage_type": "call" }, { "api_name": "review.models.Whiskey", "line_number": 13, "usage_type": "call" }, { "api_name": "review.models.Distillery.objects.get", "line_number": 15, "usage_type": "call" }, { "api_name": "review.models.Distillery.objects", "line_number": 15, "usage_type": "attribute" }, { "api_name": "review.models.Distillery", "line_number": 15, "usage_type": "name" }, { "api_name": "sys.argv", "line_number": 20, "usage_type": "attribute" }, { "api_name": "sys.argv", "line_number": 21, "usage_type": "attribute" }, { "api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call" }, { "api_name": "sys.argv", "line_number": 22, "usage_type": "attribute" }, { "api_name": "review.models.Whiskey.objects.count", "line_number": 30, "usage_type": "call" }, { "api_name": "review.models.Whiskey.objects", "line_number": 30, "usage_type": "attribute" }, { "api_name": "review.models.Whiskey", "line_number": 30, "usage_type": "name" } ]
524017942
import json from collections import Counter import requests from bs4 import BeautifulSoup GOOGLE_SCHOLAR = 'Google scholar' HTTPS_SCHOLAR_GOOGLE_RU = 'https://scholar.google.ru' EMPLOYERS_JSON = 'employers.json' HTML_PARSER = 'html.parser' JSON_INDENT = 2 LINKS = 'ะกัั‹ะปะบะธ' def parse_scholar_articles(): with open(EMPLOYERS_JSON, 'r', encoding='utf-8') as fp: employers = json.load(fp) employers_with_scholar_names = [ employer for employer, employer_info in employers.items() if GOOGLE_SCHOLAR in employer_info[LINKS] ] for employer in employers_with_scholar_names: employer_scholar_link = employers[employer][LINKS][GOOGLE_SCHOLAR] scholar_response = requests.get(employer_scholar_link) bs = BeautifulSoup(scholar_response.text, HTML_PARSER) bs_articles = bs.select("#gsc_a_t .gsc_a_tr") articles = [parse_article(bs_article) for bs_article in bs_articles] co_authors = Counter(co_author for article in articles for co_author in article['authors']) employers[employer][GOOGLE_SCHOLAR] = articles employers[employer]['co-authors'] = co_authors with open(EMPLOYERS_JSON, 'w', encoding='utf-8') as fp: json.dump(employers, fp, ensure_ascii=False, indent=JSON_INDENT) def parse_article(bs_article): try: bs_article_title = bs_article.select_one('.gsc_a_t a') bs_article_cited_by = bs_article.select_one('.gsc_a_c a') bs_article_year = bs_article.select_one('.gsc_a_y span') return { 'title': str(bs_article_title.string), 'link': HTTPS_SCHOLAR_GOOGLE_RU + bs_article_title['data-href'], 'cited_by': int(bs_article_cited_by.string) if bs_article_cited_by.string is not None else 0, 'year': int(bs_article_year.string) if bs_article_year.string is not None else -1, 'authors': parse_authors(bs_article_title), } except AttributeError as e: print(e) def parse_authors(bs_article): link = bs_article['data-href'] article = requests.get(f'{HTTPS_SCHOLAR_GOOGLE_RU}{link}') bs_article = BeautifulSoup(article.text, HTML_PARSER) bs_authors = bs_article.select_one('.gsc_vcd_value') return bs_authors.string.split(', ') if __name__ == '__main__': parse_scholar_articles()
null
parse_scholar_articles.py
parse_scholar_articles.py
py
2,417
python
en
code
null
code-starcoder2
83
[ { "api_name": "json.load", "line_number": 17, "usage_type": "call" }, { "api_name": "requests.get", "line_number": 27, "usage_type": "call" }, { "api_name": "bs4.BeautifulSoup", "line_number": 28, "usage_type": "call" }, { "api_name": "collections.Counter", "line_number": 32, "usage_type": "call" }, { "api_name": "json.dump", "line_number": 37, "usage_type": "call" }, { "api_name": "requests.get", "line_number": 60, "usage_type": "call" }, { "api_name": "bs4.BeautifulSoup", "line_number": 61, "usage_type": "call" } ]
621016804
import requests from bs4 import BeautifulSoup from tqdm import tqdm to_process = [] with open("errors.txt", "r") as infile: to_process = [int(l.strip()) for l in infile.read().splitlines()] for i in tqdm(to_process): # latest document as of Feb 2 2018 # try: tqdm.write("Processing document #" + str(i)) document = requests.get("http://www.presidency.ucsb.edu/ws/print.php?pid=" + str(i)).text soup = BeautifulSoup(document, 'html.parser') title = soup.title.contents[0].replace("\xa0", " ").replace("/", ":") if len(title) > 200: title = title[:97] + "..." + title[-100:] content = soup.find('span', {'class': 'style9'}).text with open(title + "." + str(i) + ".txt", "w") as outfile: outfile.write(content) # except Exception as e: # print(e) # with open("errors2.txt", "a") as outfile: # outfile.write(str(i) + "\n") # continue
null
website/static/css/scrape.py
scrape.py
py
963
python
en
code
null
code-starcoder2
83
[ { "api_name": "tqdm.tqdm", "line_number": 10, "usage_type": "call" }, { "api_name": "tqdm.tqdm.write", "line_number": 12, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 12, "usage_type": "name" }, { "api_name": "requests.get", "line_number": 13, "usage_type": "call" }, { "api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call" } ]
653602125
import discord from discord.ext import commands, tasks from transformers import AutoModelForCausalLM, AutoTokenizer, BigBirdForQuestionAnswering, BigBirdTokenizer import torch token = '' client = commands.Bot(command_prefix='>') channel = client.get_channel() # GPT MODEL #tokenizer = AutoTokenizer.from_pretrained("Poly-Pixel/shrek-medium-full") #model = AutoModelForCausalLM.from_pretrained("Poly-Pixel/shrek-medium-full") # BIGBIRD MODEL tokenizer = BigBirdTokenizer.from_pretrained("vasudevgupta/bigbird-roberta-natural-questions") # BIGBIRD MODEL by default its in `block_sparse` mode with num_random_blocks=3, block_size=64 model = BigBirdForQuestionAnswering.from_pretrained("vasudevgupta/bigbird-roberta-natural-questions") @tasks.loop(seconds=300.0) async def my_background_task(): """Will loop every 60 seconds and change the bots presence""" await client.change_presence(activity=discord.Game(name='About to 360 no scope outta here')) @client.event async def on_ready(): print(f'Logged in as {client.user} (ID: {client.user.id})') print('------') # Waiting until the bot is ready await client.wait_until_ready() # Starting the loop my_background_task.start() @client.event async def on_message(message): if message.author == client.user: return #step = 1 # GPT MODEL encode the new user input, add the eos_token and return a tensor in Pytorch #new_user_input_ids = tokenizer.encode(message.content + tokenizer.eos_token, return_tensors='pt') # GPT MODEL append the new user input tokens to the chat history #bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids # GPT MODEL generated a response while limiting the total chat history to 1000 tokens #chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id) # GPT MODEL pretty print last ouput tokens from bot #response = "{}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)) print(message.content) # BIGBIRD MODEL encoded_input = tokenizer(message.content, return_tensors='pt') response = model(**encoded_input) await message.channel.send(response) return client.run(token)
null
Argo.py
Argo.py
py
2,230
python
en
code
null
code-starcoder2
83
[ { "api_name": "discord.ext.commands.Bot", "line_number": 7, "usage_type": "call" }, { "api_name": "discord.ext.commands", "line_number": 7, "usage_type": "name" }, { "api_name": "transformers.BigBirdTokenizer.from_pretrained", "line_number": 14, "usage_type": "call" }, { "api_name": "transformers.BigBirdTokenizer", "line_number": 14, "usage_type": "name" }, { "api_name": "transformers.BigBirdForQuestionAnswering.from_pretrained", "line_number": 16, "usage_type": "call" }, { "api_name": "transformers.BigBirdForQuestionAnswering", "line_number": 16, "usage_type": "name" }, { "api_name": "discord.Game", "line_number": 21, "usage_type": "call" }, { "api_name": "discord.ext.tasks.loop", "line_number": 18, "usage_type": "call" }, { "api_name": "discord.ext.tasks", "line_number": 18, "usage_type": "name" } ]
151796368
''' Created on Jul 21, 2020 @author: vladislavkargin ''' import numpy as np from scipy import linalg as la import matplotlib.pyplot as plt import os import rgs.pmaps.OrderedTree as ot import rgs.mndrpy.Pairing as pg import rgs.mndrpy.Meander as mr import rgs.mndrpy.DyckPaths as dp import rgs.mndrpy.Combinatorics as cmb #from rgs.mndrpy.Meander import Meander def genPairings(n): '''This is a generator that yields all (non-crossing) Pairings on [2n]. Not all of them are different. The algorithm simply generates 2^{2n + 1} walks on Z, chooses those that sum to -1 (bridges) and rotate each of them to a Dyck path. Then it converts the path to a pairing. Of course it will be difficult to exhaust all paths for large n. ''' y = 0 width = 2 * n + 1 while y < 2 ** width: walk = [2 * int(x) - 1 for x in '{:0{size}b}'.format(y,size=width)] if sum(walk) != -1: y = y + 1 continue else: y = y + 1 #do a cyclic shift. #Find the first occurrence of the minimum S = np.cumsum(np.array([0] + walk)) m = np.min(S) positions = np.where(S == m)[0] pos = positions[0] walk1 = walk[pos:] + walk[:pos] del walk1[-1] yield pg.Pairing(path = walk1) def allPairings(n): '''returns a list of all pairings of length 2n''' A = set([]) for pairing in genPairings(n): A.add(pairing) return list(A) def allMeanders(n): ''' returns all meander systems of length 2n. ''' B = [] A = allPairings(n) for p in A: for q in A: mndr = mr.Meander(p, q) B.append(mndr) return B def dot(p, q): ''' calculates the dot product of two pairings, which is the number of cycles in the resulting meander. Parameters: p and q - pairings''' mndr = mr.Meander(p, q) cycles, _ = mndr.findCycles() c = len(cycles) return c def dotMatrix(n): ''' calculate the matrix of all dot products among non-crossing pairings of length 2n.''' A = allPairings(n) N = len(A) M = np.zeros((N, N)) for i in range(N): for j in range(N): M[i,j] = dot(A[i], A[j]) return M def randomTree(n, w, seed = None): ''' creates a random planar tree with weight sequence w ''' if seed == None: seed = 0 #OrderedTree uses different convention # about the seed of Random Generator tree = ot.randomTree(n, w, SEED = seed) return tree def randomCrossMeander(n, seed = None): '''generate a random crossing meander''' if seed != None: np.random.seed(seed) uP = pg.randomCrossPairing(n) dP = pg.randomCrossPairing(n) mndr = mr.Meander(uP, dP) return mndr def treeToDyckPath(tree): '''converts a planar rooted tree to a Dyck path''' A = [-1]* tree.graph.V dyckPath = [] v = tree.graph.root stack = [v] A[v] = 0 #dfsPath = [] n = 1 while (len(stack) > 0): #print("stack = " + str(stack)); v = stack[-1]; #peaking at the top of the stack #print("processing v = " + str(v)) flag = False #can we find unprocessed neighbors? d = tree.graph.degree(v) for i in range(d): w = tree.graph.adj[v][i] if (A[w] < 0): #this neighbor has not been explored previously A[w] = n n = n + 1 stack.append(w) #print(dfsPath) dyckPath.append(1) flag = True break #stop searching for an uexplored neighbor else: continue if (not flag): stack.pop() #no unexplored neighbor around v. Remove it from consideration. dyckPath.append(-1) del dyckPath[-1] return dyckPath def findQuasiClusters(mndr, max_gap_size = 3): ''' finds all quasi-cluster cycle in a meander parameter: meander returns: list of cluster cycles''' cycles, _ = mndr.findCycles() clusters = [] for cycle in cycles: support = cycle.copy() support.sort() isQuasiCluster = True for i, x in enumerate(support): if i != len(support) - 1: gap = support[i + 1] - x if gap > max_gap_size: isQuasiCluster = False break if isQuasiCluster: clusters.append(cycle) return clusters def largestCluster(mndr, max_gap_size = 3): ''' finds the largest quasi-cluster cycle in a meander. If there are several, finds and returns one of them''' clusters = findQuasiClusters(mndr, max_gap_size = max_gap_size) if clusters == []: return [] lengths = [len(cycle) for cycle in clusters] i = lengths.index(max(lengths)) largest = clusters[i] return largest def largestClusterLength(mndr, max_gap_size = 3): largestCycle = largestCluster(mndr, max_gap_size = max_gap_size) return len(largestCycle) def plotRandomProperMeander(n): '''gets a random proper meander and plots it''' max_iter = 100000 #maximum number of attempts to get a meander counter = 0 while counter < max_iter: mndr = mr.randomMeander(n) if mndr.isProper(): mndr.draw() return mndr else: counter = counter + 1 print("Max number of attempts: ", max_iter, " exceeded.") print("Proper meander is not found") return None def makeMeanderFromCycle(cycle): ''' creates a (crossing) meander from a cycle. For example, given a cycle (0, 5, 3, 2, 1, 4), which corresponds to upper arcs (0, 5), (3, 2) and (1, 4) and lower arcs (5, 3) (2, 1) and (4, 0) creates pairings [5 4 3 2 1 0] and [4 2 1 5 0 3] and the (crossing) meander based on these pairings. parameter: cycle, a list of permuted numbers from 0 to 2n - 1 that starts from 0 returns: a meander ''' if len(cycle)%2 != 0: print("Error: the length of the cycle must be even.") return n = int(len(cycle)/2) uprng = [0] * 2 * n dprng = [0] * 2 * n for i in range(n): uprng[cycle[2 * i]] = cycle[2 * i + 1] uprng[cycle[2 * i + 1]] = cycle[2 * i] dprng[cycle[2 * i + 1]] = cycle[(2 * i + 2)%(2 * n)] dprng[cycle[(2 * i + 2)%(2 * n)]] = cycle[2 * i + 1] mndr = mr.Meander(None, None, uprngArray = uprng, dprngArray = dprng) return mndr ''' For testing methods ''' def main(): ''' #Generate and prints a random tree (weighted) n = 100 w = [1, 1, 1] tree = randomTree(n, w) print(tree) tree.draw(drawLabels = True) #Creates a random Dyck path and plot it #path = randomDyckPath(n) path = treeToDyckPath(tree) #print(path) S = np.cumsum([0] + path) fig1, ax1 = plt.subplots(nrows=1, ncols=1) ax1.plot(S) ax1.grid(True) ax1.xaxis.set_ticks(np.arange(0,S.shape[0])) ax1.yaxis.set_ticks(np.arange(0, np.max(S) + 1)) #Generates a random meander, finds a random cluster cycle (all #point in the cycle are near each other and prints them. n = 100 mndr = mr.randomMeander(n) fig, ax = mndr.draw() clusters = findQuasiClusters(mndr) print("clusters = \n", clusters) #here it looks for the largest quasi cluster cycle -- some gaps are allowed. mgs = 9 cycle = largestCluster(mndr, max_gap_size = mgs) print("Largest quasi cluster cycle is ", cycle) print("Its length is ", largestClusterLength(mndr, max_gap_size= mgs)) mndr.drawCycle(cycle, ax = ax) ''' #let us generate all non-crossing pairings of length n n = 6 A = allPairings(n) print(len(A)) ''' p = [3, 2, 1, 0, 5, 4] q = [1, 0, 3, 2, 5, 4] mndr = Meander(p, q) mndr.draw() x = dot(p, q) print(x) ''' M = dotMatrix(n) print(type(M)) print(M) path = "/Users/vladislavkargin/Dropbox/Programs/forPapers/Meanders/" print(os.path.isdir(path)) np.savetxt(path + "Matrix" + str(n) + ".csv", M.astype(int), delimiter = ',') w = la.eigvalsh(M) print(w) q = 0.5 #q = 7/8 Mq = q ** (n - M) print(Mq) w = la.eigvalsh(Mq) print("Eigenvalues: ", w) print("Maximum eigenvalue: ", max(w)) print("Minimal Eigenvalue: ", min(w)) print("Minimal Eigenvalue * C_n^2: ", min(w) * (Mq.shape[0]**2)) print("Number of Negative Eigenvalues: ", (w[w < 0]).shape[0]) print("Index: ", (w[w > 0]).shape[0] - (w[w < 0]).shape[0]) plt.plot(w) plt.grid() ''' I used this for an example in my Math 447 class. plotRandomProperMeander(5) plt.grid(True) ''' '''This is for paper. It is now available as a notebook in Colab''' ''' seed = 3 n = 5 path1 = randomDyckPath(n, seed = seed) plotDyckPath(path1) prng1 = pg.Pairing(path = path1) prng1.draw() area = areaDyckPath(path1) print("Area of path1 is ", area) path2 = randomDyckPath(n, seed = seed + 2) #plotDyckPath(path2, method = "lowerLatticePath") area = areaDyckPath(path2) print("Area of path2 is ", area) prng2 = pg.Pairing(path = path2) mndr = Meander(prng1, prng2) mndr.draw(drawCycles=True) mr.drawAsPolygon(mndr) ''' seed = 3 n = 10 path1 = dp.randomDyckPath(n, seed = seed) dp.plotDyckPath(path1) area = dp.areaDyckPath(path1) print("Area of path1 is ", area) nvalleys = len(dp.valleys(path1)) print("Number of valleys is ", nvalleys) n = 4 np.random.seed() perm = np.random.permutation(n) print(perm) cycles = cmb.cyclesOfPerm(perm) print(cycles) plt.show() if __name__ == '__main__': main()
null
rgs/mndrpy/Utility.py
Utility.py
py
10,004
python
en
code
null
code-starcoder2
83
[ { "api_name": "numpy.cumsum", "line_number": 40, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 40, "usage_type": "call" }, { "api_name": "numpy.min", "line_number": 41, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 42, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Pairing.Pairing", "line_number": 46, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Pairing", "line_number": 46, "usage_type": "name" }, { "api_name": "rgs.mndrpy.Meander.Meander", "line_number": 61, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Meander", "line_number": 61, "usage_type": "name" }, { "api_name": "rgs.mndrpy.Meander.Meander", "line_number": 69, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Meander", "line_number": 69, "usage_type": "name" }, { "api_name": "numpy.zeros", "line_number": 79, "usage_type": "call" }, { "api_name": "rgs.pmaps.OrderedTree.randomTree", "line_number": 92, "usage_type": "call" }, { "api_name": "rgs.pmaps.OrderedTree", "line_number": 92, "usage_type": "name" }, { "api_name": "numpy.random.seed", "line_number": 98, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 98, "usage_type": "attribute" }, { "api_name": "rgs.mndrpy.Pairing.randomCrossPairing", "line_number": 99, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Pairing", "line_number": 99, "usage_type": "name" }, { "api_name": "rgs.mndrpy.Pairing.randomCrossPairing", "line_number": 100, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Pairing", "line_number": 100, "usage_type": "name" }, { "api_name": "rgs.mndrpy.Meander.Meander", "line_number": 101, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Meander", "line_number": 101, "usage_type": "name" }, { "api_name": "rgs.mndrpy.Meander.randomMeander", "line_number": 178, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Meander", "line_number": 178, "usage_type": "name" }, { "api_name": "rgs.mndrpy.Meander.Meander", "line_number": 209, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Meander", "line_number": 209, "usage_type": "name" }, { "api_name": "os.path.isdir", "line_number": 274, "usage_type": "call" }, { "api_name": "os.path", "line_number": 274, "usage_type": "attribute" }, { "api_name": "numpy.savetxt", "line_number": 275, "usage_type": "call" }, { "api_name": "scipy.linalg.eigvalsh", "line_number": 277, "usage_type": "call" }, { "api_name": "scipy.linalg", "line_number": 277, "usage_type": "name" }, { "api_name": "scipy.linalg.eigvalsh", "line_number": 285, "usage_type": "call" }, { "api_name": "scipy.linalg", "line_number": 285, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.plot", "line_number": 292, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.grid", "line_number": 293, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name" }, { "api_name": "rgs.mndrpy.DyckPaths.randomDyckPath", "line_number": 324, "usage_type": "call" }, { "api_name": "rgs.mndrpy.DyckPaths", "line_number": 324, "usage_type": "name" }, { "api_name": "rgs.mndrpy.DyckPaths.plotDyckPath", "line_number": 325, "usage_type": "call" }, { "api_name": "rgs.mndrpy.DyckPaths", "line_number": 325, "usage_type": "name" }, { "api_name": "rgs.mndrpy.DyckPaths.areaDyckPath", "line_number": 326, "usage_type": "call" }, { "api_name": "rgs.mndrpy.DyckPaths", "line_number": 326, "usage_type": "name" }, { "api_name": "rgs.mndrpy.DyckPaths.valleys", "line_number": 328, "usage_type": "call" }, { "api_name": "rgs.mndrpy.DyckPaths", "line_number": 328, "usage_type": "name" }, { "api_name": "numpy.random.seed", "line_number": 332, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 332, "usage_type": "attribute" }, { "api_name": "numpy.random.permutation", "line_number": 333, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 333, "usage_type": "attribute" }, { "api_name": "rgs.mndrpy.Combinatorics.cyclesOfPerm", "line_number": 335, "usage_type": "call" }, { "api_name": "rgs.mndrpy.Combinatorics", "line_number": 335, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.show", "line_number": 338, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 338, "usage_type": "name" } ]
115812409
import pytest from ptera import BaseOverlay, Overlay, Recurrence, select, tag, tooled from ptera.core import Capture, Tap from ptera.selector import Element, parse from ptera.selfless import default from ptera.tools import every from .common import one_test_per_assert @tooled def brie(x, y) -> tag.Fromage: """Brie is a sort of cheese.""" a: tag.Bouffe = x * x b: "@Bouffe & @Agrement" = y * y return a + b @tooled def extra(cheese): return cheese + 1 @tooled @tooled @tooled def double_brie(x1, y1): a = brie[[1]](x1, x1 + 1) b = brie[[2]](y1, y1 + 1) aa = extra[[1]](a) bb = extra[[2]](b) return aa + bb @one_test_per_assert def test_normal_call(): assert brie(3, 4) == 25 assert double_brie(3, 4) == 68 class GrabAll: def __init__(self, pattern): self.results = [] pattern = select(pattern) def listener(**kwargs): self.results.append( {name: cap.values for name, cap in kwargs.items()} ) self.rules = {pattern: {"listeners": listener}} def _test(f, args, pattern): store = GrabAll(pattern) with BaseOverlay(store.rules): f(*args) return store.results def _dbrie(pattern): return _test(double_brie, (2, 10), pattern) @one_test_per_assert def test_patterns(): # Simple, test focus assert _dbrie("*(x)") == [{"x": [2]}, {"x": [10]}] assert _dbrie("*(!x)") == [{"x": [2]}, {"x": [10]}] assert _dbrie("*(!x, y)") == [{"x": [2], "y": [3]}, {"x": [10], "y": [11]}] assert _dbrie("*(x, y)") == [{"x": [2], "y": [3]}, {"x": [10], "y": [11]}] # Simple assert _dbrie("*(!a)") == [{"a": [4]}, {"a": [100]}, {"a": [13]}] assert _dbrie("brie(!a)") == [{"a": [4]}, {"a": [100]}] # Indirect assert _dbrie("a") == [{"a": [4]}, {"a": [100]}, {"a": [13]}] assert _dbrie("double_brie >> a") == [{"a": [13]}, {"a": [4]}, {"a": [100]}] assert _dbrie("double_brie >> x") == [{"x": [2]}, {"x": [10]}] # Multi-level assert _dbrie("double_brie(a) > brie(x)") == [{"a": [13], "x": [2, 10]}] assert _dbrie("double_brie(a) > brie(!x)") == [ {"a": [13], "x": [2]}, {"a": [13], "x": [10]}, ] # Accumulate values across calls assert _dbrie("double_brie(extra(cheese), brie(x))") == [ {"cheese": [13, 221], "x": [2, 10]} ] assert _dbrie("double_brie(extra(!cheese), brie(x))") == [ {"cheese": [13], "x": [2, 10]}, {"cheese": [221], "x": [2, 10]}, ] # Indexing assert _dbrie("brie[[$i]](!a)") == [ {"a": [4], "i": [1]}, {"a": [100], "i": [2]}, ] assert _dbrie("brie[[1]](!a)") == [{"a": [4]}] assert _dbrie("brie[[1.0]](!a)") == [{"a": [4]}] assert _dbrie("brie[[2]](!a)") == [{"a": [100]}] # Parameter assert _dbrie("brie($v:tag.Bouffe)") == [{"v": [4, 9]}, {"v": [100, 121]}] assert _dbrie("brie($v:@Bouffe)") == [{"v": [4, 9]}, {"v": [100, 121]}] assert _dbrie("brie(!$v:tag.Bouffe)") == [ {"v": [4]}, {"v": [9]}, {"v": [100]}, {"v": [121]}, ] assert _dbrie("*(a) >> brie(!$v:tag.Bouffe)") == [ {"a": [13], "v": [4]}, {"a": [13], "v": [9]}, {"a": [13], "v": [100]}, {"a": [13], "v": [121]}, ] # Function category assert _dbrie("*:tag.Fromage(a)") == [{"a": [4]}, {"a": [100]}] # Inexistent category assert _dbrie("brie > $x:tag.Xylophone") == [] # Filter on value assert _dbrie("brie(!x, y, a=4)") == [{"x": [2], "y": [3]}] assert _dbrie("double_brie(x1=2) > brie > x") == [{"x": [2]}, {"x": [10]}] assert _dbrie("double_brie(#value=1234) > brie > x") == [] @tooled def snapple(x): a = cabanana(x + 1) b = cabanana(x + 2) return a + b @tooled def cabanana(y): return peacherry(y + 1) @tooled def peacherry(z): return z + 1 def test_deep(): assert _test(snapple, [5], "snapple > cabanana(y) > peacherry > z") == [ {"y": [6], "z": [7]}, {"y": [7], "z": [8]}, ] @tooled def fib(n): f = Recurrence(2) f[0] = 1 f[1] = 1 for i in range(2, n + 1): f[i] = f[i - 1] + f[i - 2] return f[n] even = every(2) def test_match(): res, fs = fib.using("f[~even] as x")(5) assert fs.map("x") == [1, 2, 5] res, fs = fib.using("f[$i ~ every(2)] as x")(5) assert fs.map("x") == [1, 2, 5] res, fs = fib.using("f[$i ~ every(2, start=1)] as x")(5) assert fs.map("x") == [1, 3, 8] res, fs = fib.using("f[$i ~ even] as x")(5) assert fs.map("i", "x") == [(0, 1), (2, 2), (4, 5)] def test_indexing(): assert fib(5) == 8 res, fs = fib.using("f[0] as x")(5) assert fs.map("x") == [1] res, fs = fib.using("'f'(#key=0) as x")(5) assert fs.map("x") == [1] res, fs = fib.using("f[$i] as x")(5) intermediates = [1, 1, 2, 3, 5, 8] indices = list(range(6)) assert fs.map("x") == intermediates assert fs.map("i") == indices assert fs.map("i", "x") == list(zip(indices, intermediates)) def test_indexing_2(): res, fs = fib.full_tapping("fib(!n, f[3] as x)")(5) assert res == 8 assert fs.map("n") == [5] assert fs.map("x") == [3] def test_attach(): _brie = brie.attach(hello=12).using("brie > #hello") res, hello = _brie(5, 6) assert hello.map("hello") == [12] @tooled def superbrie(n): result = 0 k = 0 for i in range(n): for j in range(n): result += brie[[i, j]](k, 2) k = k + 1 return result def test_function_indexing(): assert superbrie(10) == 328750 _, x = superbrie.using("brie[[1, $j]] > x")(10) assert x.map("j") == list(range(10)) assert x.map("x") == list(range(10, 20)) _, x = superbrie.using("brie[[1, $j ~ every(3)]] > x")(10) assert x.map("x") == list(range(10, 20, 3)) def test_immediate_evaluation(): # This uses a GetterAccumulator ss = superbrie.rewriting({"superbrie(k=7) > brie > x": (lambda: 0)}) assert ss(10) == 328701 # This uses a GetterAccumulator ss = superbrie.rewriting({"superbrie(i=9) > brie > x": (lambda: 0)}) assert ss(10) == 239365 # By default this uses a TotalAccumulator, which requires every # value of i to be 1 and every j to be a multiple of 3 _, x = superbrie.full_tapping("superbrie(i=1, j~every(3)) > brie > x")(10) assert x.map("x") == [] # Creates a SetterAccumulator which only takes into account the values # of i and j at the moment the focus variable x is triggered _, x = superbrie.using( Tap("superbrie(i=1, j~every(3)) > brie > x", immediate=True) )(10) assert x.map("x") == list(range(10, 20, 3)) def test_nested_overlay(): expectedx = [{"x": [2]}, {"x": [10]}] expectedy = [{"y": [3]}, {"y": [11]}] storex = GrabAll("brie > x") storey = GrabAll("brie > y") with BaseOverlay({**storex.rules, **storey.rules}): assert double_brie(2, 10) == 236 assert storex.results == expectedx assert storey.results == expectedy storex = GrabAll("brie > x") storey = GrabAll("brie > y") with BaseOverlay(storex.rules): with BaseOverlay(storey.rules): assert double_brie(2, 10) == 236 assert storex.results == expectedx assert storey.results == expectedy @tooled def mystery(hat): surprise: tag.MyStErY return surprise * hat def test_provide_var(): with BaseOverlay({"mystery(!surprise)": {"value": lambda surprise: 4}}): assert mystery(10) == 40 with BaseOverlay( {"mystery(hat, !surprise)": {"value": lambda hat, surprise: hat.value}} ): assert mystery(8) == 64 def test_missing_var(): try: mystery(3) except NameError as err: assert err.varname == "surprise" assert err.function == mystery info = err.info() assert info["annotation"] == tag.MyStErY with pytest.raises(NameError): mystery.tweaking({"mystery(hat=10) > surprise": 0})(3) def test_tap_map(): rval, acoll = double_brie.full_tapping("brie(!a, b)")(2, 10) assert acoll.map("a") == [4, 100] assert acoll.map("b") == [9, 121] assert acoll.map(lambda a, b: a + b) == [13, 221] assert acoll.map() == [{"a": 4, "b": 9}, {"a": 100, "b": 121}] assert acoll.map(lambda **kwargs: kwargs["a"] + kwargs["b"]) == [13, 221] def test_tap_map_all(): rval, acoll = double_brie.full_tapping("double_brie(!x1) >> brie(x)")(2, 10) with pytest.raises(ValueError): acoll.map("x1", "x") assert acoll.map_all("x1", "x") == [([2], [2, 10])] assert acoll.map_all() == [{"x1": [2], "x": [2, 10]}] def test_tap_map_named(): rval = double_brie.using(data="brie(!a, b)")(2, 10) assert rval.value == 236 assert rval.data.map("a") == [4, 100] def test_tap_map_full(): rval, acoll = double_brie.using("brie > $param:tag.Bouffe")(2, 10) assert acoll.map_full(lambda param: param.value) == [4, 9, 100, 121] assert acoll.map_full(lambda param: param.name) == ["a", "b", "a", "b"] def test_on(): dbrie = double_brie.clone(return_object=True) @dbrie.on("brie > x") def minx(x): return -x @dbrie.on("brie > x", all=True) def minx_all(x): return [-v for v in x] @dbrie.on("brie > x", full=True) def minx_full(x): assert x.name == "x" return -x.value results = dbrie(2, 10) assert results.minx == [-2, -10] assert results.minx_all == [[-2], [-10]] assert results.minx_full == [-2, -10] def test_use(): dbrie = double_brie.clone(return_object=True) dbrie.use(data="brie(!a, b)") rval = dbrie(2, 10) assert rval.value == 236 assert rval.data.map() == [{"a": 4}, {"a": 100}] def test_full_tap(): dbrie = double_brie.clone(return_object=True) dbrie.full_tap(data="brie(!a, b)") rval = dbrie(2, 10) assert rval.value == 236 assert rval.data.map("a") == [4, 100] assert rval.data.map("b") == [9, 121] def test_tweak(): dbrie = double_brie.clone() dbrie.tweak({"brie > x": 10}) assert dbrie(2, 10) == 332 def test_rewrite(): dbrie = double_brie.clone() dbrie.rewrite({"brie(x, !y)": lambda x: x}) assert dbrie(2, 10) == 210 def test_collect(): dbrie = double_brie.clone(return_object=True) @dbrie.collect("brie > x") def sumx(xs): return sum(xs.map("x")) results = dbrie(2, 10) assert results.sumx == 12 @tooled def square(x): rval = x * x return rval @tooled def sumsquares(x, y): xx = square(x) yy = square(y) rval = xx + yy return rval def test_readme(): results = sumsquares.using(q="x")(3, 4) assert results.q.map("x") == [3, 3, 4] results = sumsquares.using(q="square > x")(3, 4) assert results.q.map("x") == [3, 4] results = sumsquares.full_tapping(q="square(rval) > x")(3, 4) assert results.q.map("x", "rval") == [(3, 9), (4, 16)] results = sumsquares.full_tapping( q="sumsquares(x as ssx, y as ssy) > square(rval) > x" )(3, 4) assert results.q.map("ssx", "ssy", "x", "rval") == [ (3, 4, 3, 9), (3, 4, 4, 16), ] results = sumsquares.full_tapping( q="sumsquares(!x as ssx, y as ssy) > square(rval, x)" )(3, 4) assert results.q.map_all("ssx", "ssy", "x", "rval") == [ ([3], [4], [3, 4], [9, 16]) ] result = sumsquares.tweaking({"square > rval": 0})(3, 4) assert result == 0 result = sumsquares.rewriting({"square(x) > rval": lambda x: x + 1})(3, 4) assert result == 9 @tooled.defaults(x=10, y=20) def vanilla(x, y): return x * y def test_ptera_defaults(): assert vanilla() == 200 assert vanilla(4, 5) == 20 def test_capture(): cap = Capture(parse("x")) assert cap.name == "x" with pytest.raises(ValueError): cap.value cap.accum("x", 1) assert cap.name == "x" assert cap.value == 1 cap.accum("x", 2) with pytest.raises(ValueError): cap.value assert str(cap) == "Capture(sel(\"!x\"), ['x', 'x'], [1, 2])" cap = Capture(Element(name=None)) with pytest.raises(ValueError): cap.name cap.accum("y", 7) assert cap.name == "y" assert cap.value == 7 cap.accum("z", 31) with pytest.raises(ValueError): cap.name with pytest.raises(ValueError): cap.value @tooled def cake(): flavour: tag.Flavour return f"This is a {flavour} cake" @tooled def fruitcake(): my_cake = cake.new(flavour="fruit").clone(return_object=True) @my_cake.on("flavour") def yum(flavour): return flavour * 2 return my_cake() def test_listener_within_ptera(): res = fruitcake() assert res.value == "This is a fruit cake" assert res.yum == ["fruitfruit"] def test_doc(): assert brie.__doc__ == """Brie is a sort of cheese.""" class Matou: def __init__(self, species): self.species = species @tooled def meow(self, repeat=1): ms = "m" es = "e" os = "o" * repeat ws = "w" * len(self.species) cry = ms + es + os + ws meows = [cry] * repeat return " ".join(meows) def meow_nodeco(self, repeat=1): ms = "m" es = "e" os = "o" * repeat ws = "w" * len(self.species) cry = ms + es + os + ws meows = [cry] * repeat return " ".join(meows) def test_method(): siamese = Matou("siamese") assert siamese.meow() == "meowwwwwww" assert siamese.meow.tweaking({"Matou.meow > es": "eee"})() == "meeeowwwwwww" with Overlay.tweaking({"Matou.meow > es": "eee"}): assert siamese.meow() == "meeeowwwwwww" with Overlay.tweaking({"Matou.meow > repeat": 2}): assert siamese.meow() == "meoowwwwwww meoowwwwwww" store = GrabAll("Matou.meow(repeat) > os") with BaseOverlay(store.rules): for i in range(3): siamese.meow(i) assert store.results == [ {"os": [""], "repeat": [0]}, {"os": ["o"], "repeat": [1]}, {"os": ["oo"], "repeat": [2]}, ] def test_redirect_method(): siamese = Matou("siamese") tooled.inplace(Matou.meow_nodeco) assert siamese.meow_nodeco() == "meowwwwwww" with Overlay.tweaking({"Matou.meow_nodeco > es": "eee"}): assert siamese.meow_nodeco() == "meeeowwwwwww" store = GrabAll("Matou.meow_nodeco(repeat) > os") with BaseOverlay(store.rules): for i in range(3): siamese.meow_nodeco(i) assert store.results == [ {"os": [""], "repeat": [0]}, {"os": ["o"], "repeat": [1]}, {"os": ["oo"], "repeat": [2]}, ] def test_overlay(): def twice_mystery(x): return mystery(x), mystery(x + 1) ov = Overlay() ov.tweak({"surprise": 2}) @ov.on("mystery > hat") def hats(hat): return hat * hat @ov.on("mystery(hat) > surprise") def shats(surprise, hat): return (surprise, hat) with ov as results: assert twice_mystery(10) == (20, 22) assert results.hats == [100, 121] assert results.shats == [(2, 10), (2, 11)] @tooled def brooms(xs): rval = 0 for i, x in enumerate(xs): rval = rval + (i + 1) * x return rval def test_for_loop(): assert brooms([1, 2, 3]) == 14 assert brooms.tweaking({"i": 0})([1, 2, 3]) == 6 @tooled def multitag(): y: tag.Bouffe = default(10) y = y * y return y def test_samevar_multitag(): assert multitag() == 100 with Overlay.tweaking({"y:tag.Bouffe": 5}): assert multitag() == 25 with Overlay.tweaking({"y:tag.Irrelevant": 5}): assert multitag() == 100 def test_redirect(): def funkykong(x): surf: tag.Surfboard = True return x * x if surf else x orig_funky = funkykong new_funky = tooled.inplace(funkykong) assert funkykong is orig_funky assert funkykong is new_funky assert funkykong(10) == 100 with Overlay.tweaking({"surf:tag.Surfboard": False}): assert funkykong(10) == 10 assert funkykong(10) == 100 def test_redirect_noclobber(): def one(): x = 1 return x def two(): x = 1 return x * 2 tooled.inplace(one) tooled.inplace(two) assert one() == 1 assert two() == 2 with Overlay.tweaking({"x": 7}): assert one() == 7 assert two() == 14 def exposure(n): x = 2 return n ** x def test_redirect_global(): old_exposure = exposure tooled.inplace(exposure) assert exposure(8) == 64 with Overlay.tweaking({"x": 3}): assert exposure(8) == 512 assert old_exposure is exposure
null
tests/test_core.py
test_core.py
py
16,697
python
en
code
null
code-starcoder2
83
[ { "api_name": "ptera.tag.Bouffe", "line_number": 15, "usage_type": "attribute" }, { "api_name": "ptera.tag", "line_number": 15, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 12, "usage_type": "name" }, { "api_name": "ptera.tag.Fromage", "line_number": 13, "usage_type": "attribute" }, { "api_name": "ptera.tag", "line_number": 13, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 20, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 25, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 26, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 27, "usage_type": "name" }, { "api_name": "common.one_test_per_assert", "line_number": 36, "usage_type": "name" }, { "api_name": "ptera.select", "line_number": 45, "usage_type": "call" }, { "api_name": "ptera.BaseOverlay", "line_number": 57, "usage_type": "call" }, { "api_name": "common.one_test_per_assert", "line_number": 66, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 136, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 143, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 148, "usage_type": "name" }, { "api_name": "ptera.Recurrence", "line_number": 162, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 160, "usage_type": "name" }, { "api_name": "ptera.tools.every", "line_number": 170, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 217, "usage_type": "name" }, { "api_name": "ptera.core.Tap", "line_number": 256, "usage_type": "call" }, { "api_name": "ptera.BaseOverlay", "line_number": 267, "usage_type": "call" }, { "api_name": "ptera.BaseOverlay", "line_number": 274, "usage_type": "call" }, { "api_name": "ptera.BaseOverlay", "line_number": 275, "usage_type": "call" }, { "api_name": "ptera.tag.MyStErY", "line_number": 283, "usage_type": "attribute" }, { "api_name": "ptera.tag", "line_number": 283, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 281, "usage_type": "name" }, { "api_name": "ptera.BaseOverlay", "line_number": 288, "usage_type": "call" }, { "api_name": "ptera.BaseOverlay", "line_number": 291, "usage_type": "call" }, { "api_name": "ptera.tag.MyStErY", "line_number": 304, "usage_type": "attribute" }, { "api_name": "ptera.tag", "line_number": 304, "usage_type": "name" }, { "api_name": "pytest.raises", "line_number": 306, "usage_type": "call" }, { "api_name": "pytest.raises", "line_number": 321, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 401, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 407, "usage_type": "name" }, { "api_name": "ptera.tooled.defaults", "line_number": 447, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 447, "usage_type": "name" }, { "api_name": "ptera.core.Capture", "line_number": 458, "usage_type": "call" }, { "api_name": "ptera.selector.parse", "line_number": 458, "usage_type": "call" }, { "api_name": "pytest.raises", "line_number": 460, "usage_type": "call" }, { "api_name": "pytest.raises", "line_number": 466, "usage_type": "call" }, { "api_name": "ptera.core.Capture", "line_number": 471, "usage_type": "call" }, { "api_name": "ptera.selector.Element", "line_number": 471, "usage_type": "call" }, { "api_name": "pytest.raises", "line_number": 472, "usage_type": "call" }, { "api_name": "pytest.raises", "line_number": 478, "usage_type": "call" }, { "api_name": "pytest.raises", "line_number": 480, "usage_type": "call" }, { "api_name": "ptera.tag.Flavour", "line_number": 486, "usage_type": "attribute" }, { "api_name": "ptera.tag", "line_number": 486, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 484, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 490, "usage_type": "name" }, { "api_name": "ptera.tooled", "line_number": 515, "usage_type": "name" }, { "api_name": "ptera.Overlay.tweaking", "line_number": 541, "usage_type": "call" }, { "api_name": "ptera.Overlay", "line_number": 541, "usage_type": "name" }, { "api_name": "ptera.Overlay.tweaking", "line_number": 544, "usage_type": "call" }, { "api_name": "ptera.Overlay", "line_number": 544, "usage_type": "name" }, { "api_name": "ptera.BaseOverlay", "line_number": 548, "usage_type": "call" }, { "api_name": "ptera.tooled.inplace", "line_number": 561, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 561, "usage_type": "name" }, { "api_name": "ptera.Overlay.tweaking", "line_number": 565, "usage_type": "call" }, { "api_name": "ptera.Overlay", "line_number": 565, "usage_type": "name" }, { "api_name": "ptera.BaseOverlay", "line_number": 569, "usage_type": "call" }, { "api_name": "ptera.Overlay", "line_number": 583, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 601, "usage_type": "name" }, { "api_name": "ptera.tag.Bouffe", "line_number": 616, "usage_type": "attribute" }, { "api_name": "ptera.tag", "line_number": 616, "usage_type": "name" }, { "api_name": "ptera.selfless.default", "line_number": 616, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 614, "usage_type": "name" }, { "api_name": "ptera.Overlay.tweaking", "line_number": 623, "usage_type": "call" }, { "api_name": "ptera.Overlay", "line_number": 623, "usage_type": "name" }, { "api_name": "ptera.Overlay.tweaking", "line_number": 625, "usage_type": "call" }, { "api_name": "ptera.Overlay", "line_number": 625, "usage_type": "name" }, { "api_name": "ptera.tag.Surfboard", "line_number": 631, "usage_type": "attribute" }, { "api_name": "ptera.tag", "line_number": 631, "usage_type": "name" }, { "api_name": "ptera.tooled.inplace", "line_number": 635, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 635, "usage_type": "name" }, { "api_name": "ptera.Overlay.tweaking", "line_number": 641, "usage_type": "call" }, { "api_name": "ptera.Overlay", "line_number": 641, "usage_type": "name" }, { "api_name": "ptera.tooled.inplace", "line_number": 655, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 655, "usage_type": "name" }, { "api_name": "ptera.tooled.inplace", "line_number": 656, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 656, "usage_type": "name" }, { "api_name": "ptera.Overlay.tweaking", "line_number": 661, "usage_type": "call" }, { "api_name": "ptera.Overlay", "line_number": 661, "usage_type": "name" }, { "api_name": "ptera.tooled.inplace", "line_number": 674, "usage_type": "call" }, { "api_name": "ptera.tooled", "line_number": 674, "usage_type": "name" }, { "api_name": "ptera.Overlay.tweaking", "line_number": 677, "usage_type": "call" }, { "api_name": "ptera.Overlay", "line_number": 677, "usage_type": "name" } ]
279687301
#!/usr/bin/env python import time import numpy as np from mujoco_py import load_model_from_xml, MjSim, MjViewer XML = ''' <mujoco> <worldbody> <geom name='floor' pos='0 0 0' size='5 5 .125' type='plane' condim='3'/> <body name='ball' pos='0 0 1'> <joint type='free'/> <geom name='ball' pos='0 0 0' size='.1' type='sphere' rgba='1 0 0 1'/> </body> </worldbody> </mujoco> ''' model = load_model_from_xml(XML) sim = MjSim(model) viewer = MjViewer(sim) import pdb; pdb.set_trace() while True: sim.model.opt.gravity[0] = np.sin(time.time()) sim.model.opt.gravity[1] = np.cos(time.time()) sim.step() viewer.render()
null
spinup/algos/ddpg_n_step/change_gravity_on_the_fly.py
change_gravity_on_the_fly.py
py
685
python
en
code
null
code-starcoder2
83
[ { "api_name": "mujoco_py.load_model_from_xml", "line_number": 20, "usage_type": "call" }, { "api_name": "mujoco_py.MjSim", "line_number": 21, "usage_type": "call" }, { "api_name": "mujoco_py.MjViewer", "line_number": 22, "usage_type": "call" }, { "api_name": "pdb.set_trace", "line_number": 23, "usage_type": "call" }, { "api_name": "numpy.sin", "line_number": 25, "usage_type": "call" }, { "api_name": "time.time", "line_number": 25, "usage_type": "call" }, { "api_name": "numpy.cos", "line_number": 26, "usage_type": "call" }, { "api_name": "time.time", "line_number": 26, "usage_type": "call" } ]
29648438
from datetime import datetime from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Table, Column, Integer, String, DateTime, Sequence, ForeignKey, UniqueConstraint from sqlalchemy.orm import relationship, backref from sqlalchemy import event from sqlalchemy import DDL from ev_site import EVSite Base = declarative_base() class EVJPActress(Base): __tablename__ = 'ev_jp_actress' ev_actress_id = Column(Integer, Sequence('ev_actress_id_seq', start=10000001), primary_key=True) name = Column(String(30), index=True, unique=True, nullable=False) #name = Column(String(30), index=True, nullable=False) #name_kana = Column(String(30), default=None, nullable=False) #name_kana = Column(String(30), index=True, nullable=True) name_hiragana = Column(String(30), index=True, nullable=True) name_katakana = Column(String(30), index=True, nullable=True) #name_index = Column(String(1), index=True, nullable=True) name_index_hiragana = Column(String(1), index=True, nullable=True) name_index_katakana = Column(String(1), index=True, nullable=True) created = Column(DateTime(), default=datetime.now(), nullable=False) updated = Column(DateTime(), default=datetime.now(), nullable=False) #def __init__(self, name, name_hiragana, name_katakana, name_index_hiragana, name_index_katakana): # self.name = name # self.name_hiragana = name_hiragana # self.name_katakana = name_katakana # self.name_index_hiragana = name_index_hiragana # self.name_index_katakana = name_index_katakana event.listen( EVJPActress.__table__, 'after_create', DDL("ALTER TABLE %(table)s AUTO_INCREMENT = 10000001;") ) #event.listen( # EVJPActress.__table__, # 'after_create', # DDL("ALTER TABLE %(table)s ADD UNIQUE (name, name_kana, name_index);") #) class EVJPActressImage(Base): __tablename__ = 'ev_jp_actress_image' ev_actress_image_id = Column(Integer, Sequence('ev_actress_image_id_seq', start=10000001), primary_key=True) #name = Column(String(50), index=True, unique=True, nullable=False) name = Column(String(50), index=True, nullable=False) ev_actress_id = Column(Integer, ForeignKey(EVJPActress.ev_actress_id), nullable=False) #ev_actress = relationship(EVJPActress, backref='ev_actress_images') ev_actress = relationship(EVJPActress, backref='images') def __init__(self, name): self.name = name event.listen( EVJPActressImage.__table__, 'after_create', DDL("ALTER TABLE %(table)s AUTO_INCREMENT = 10000001;") ) event.listen( EVJPActressImage.__table__, 'after_create', DDL("ALTER TABLE %(table)s ADD UNIQUE (name, ev_actress_id);") ) class SiteJPActress(Base): __tablename__ = 'site_jp_actress' """ ev_actress_id cannot be primary key because same actress names might be inserted """ site_actress_id = Column(Integer, Sequence('site_actress_id_seq', start=10000001), primary_key=True) ev_actress_id = Column(Integer, ForeignKey(EVJPActress.ev_actress_id), index=True, nullable=False) name = Column(String(30), ForeignKey(EVJPActress.name), index=True, nullable=False) #name_kana = Column(String(30), ForeignKey(EVJPActress.name_kana), nullable=True) #name_kana = Column(String(30), ForeignKey(EVJPActress.name_kana), index=True, nullable=True) name_hiragana = Column(String(30), ForeignKey(EVJPActress.name_hiragana), index=True, nullable=True) name_katakana = Column(String(30), ForeignKey(EVJPActress.name_katakana), index=True, nullable=True) #name_index = Column(String(1), ForeignKey(EVJPActress.name_index), index=True, nullable=True) name_index_hiragana = Column(String(1), ForeignKey(EVJPActress.name_index_hiragana), index=True, nullable=True) name_index_katakana = Column(String(1), ForeignKey(EVJPActress.name_index_katakana), index=True, nullable=True) #site_id = Column(Integer, nullable=False) site_id = Column(Integer, ForeignKey(EVSite.site_id), nullable=False) created = Column(DateTime(), default=datetime.now(), nullable=False) updated = Column(DateTime(), default=datetime.now(), nullable=False) __mapper_args__ = {'polymorphic_on': site_id} ev_actress = relationship(EVJPActress, primaryjoin='and_(EVJPActress.ev_actress_id==SiteJPActress.ev_actress_id, \ EVJPActress.name==SiteJPActress.name, \ EVJPActress.name_hiragana==SiteJPActress.name_hiragana, \ EVJPActress.name_katakana==SiteJPActress.name_katakana, \ EVJPActress.name_index_hiragana==SiteJPActress.name_index_hiragana, \ EVJPActress.name_index_katakana==SiteJPActress.name_index_katakana)', backref='site_actresses') def __init__(self, site_id): self.site_id = site_id event.listen( SiteJPActress.__table__, 'after_create', DDL("ALTER TABLE %(table)s AUTO_INCREMENT = 10000001;") ) event.listen( SiteJPActress.__table__, 'after_create', DDL("ALTER TABLE %(table)s ADD UNIQUE (name, site_id);") ) #event.listen( # SiteJPActress.__table__, # 'after_create', # DDL("ALTER TABLE %(table)s ADD UNIQUE (name, name_kana, name_index);") #) from erovideo.models.engine import engine Base.metadata.create_all(engine)
null
erovideo/old/models.012013/old/ev_actress.010913.py
ev_actress.010913.py
py
5,524
python
en
code
null
code-starcoder2
83
[ { "api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 9, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call" }, { "api_name": "sqlalchemy.Integer", "line_number": 15, "usage_type": "argument" }, { "api_name": "sqlalchemy.Sequence", "line_number": 15, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 16, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 20, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call" }, { "api_name": "sqlalchemy.DateTime", "line_number": 25, "usage_type": "call" }, { "api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 25, "usage_type": "name" }, { "api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call" }, { "api_name": "sqlalchemy.DateTime", "line_number": 26, "usage_type": "call" }, { "api_name": "datetime.datetime.now", "line_number": 26, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 26, "usage_type": "name" }, { "api_name": "sqlalchemy.event.listen", "line_number": 36, "usage_type": "call" }, { "api_name": "sqlalchemy.event", "line_number": 36, "usage_type": "name" }, { "api_name": "sqlalchemy.DDL", "line_number": 39, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 52, "usage_type": "call" }, { "api_name": "sqlalchemy.Integer", "line_number": 52, "usage_type": "argument" }, { "api_name": "sqlalchemy.Sequence", "line_number": 52, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 54, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 54, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 55, "usage_type": "call" }, { "api_name": "sqlalchemy.Integer", "line_number": 55, "usage_type": "argument" }, { "api_name": "sqlalchemy.ForeignKey", "line_number": 55, "usage_type": "call" }, { "api_name": "sqlalchemy.orm.relationship", "line_number": 58, "usage_type": "call" }, { "api_name": "sqlalchemy.event.listen", "line_number": 64, "usage_type": "call" }, { "api_name": "sqlalchemy.event", "line_number": 64, "usage_type": "name" }, { "api_name": "sqlalchemy.DDL", "line_number": 67, "usage_type": "call" }, { "api_name": "sqlalchemy.event.listen", "line_number": 70, "usage_type": "call" }, { "api_name": "sqlalchemy.event", "line_number": 70, "usage_type": "name" }, { "api_name": "sqlalchemy.DDL", "line_number": 73, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 81, "usage_type": "call" }, { "api_name": "sqlalchemy.Integer", "line_number": 81, "usage_type": "argument" }, { "api_name": "sqlalchemy.Sequence", "line_number": 81, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 82, "usage_type": "call" }, { "api_name": "sqlalchemy.Integer", "line_number": 82, "usage_type": "argument" }, { "api_name": "sqlalchemy.ForeignKey", "line_number": 82, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 83, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 83, "usage_type": "call" }, { "api_name": "sqlalchemy.ForeignKey", "line_number": 83, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 86, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 86, "usage_type": "call" }, { "api_name": "sqlalchemy.ForeignKey", "line_number": 86, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 87, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 87, "usage_type": "call" }, { "api_name": "sqlalchemy.ForeignKey", "line_number": 87, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 89, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 89, "usage_type": "call" }, { "api_name": "sqlalchemy.ForeignKey", "line_number": 89, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 90, "usage_type": "call" }, { "api_name": "sqlalchemy.String", "line_number": 90, "usage_type": "call" }, { "api_name": "sqlalchemy.ForeignKey", "line_number": 90, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 92, "usage_type": "call" }, { "api_name": "sqlalchemy.Integer", "line_number": 92, "usage_type": "argument" }, { "api_name": "sqlalchemy.ForeignKey", "line_number": 92, "usage_type": "call" }, { "api_name": "ev_site.EVSite.site_id", "line_number": 92, "usage_type": "attribute" }, { "api_name": "ev_site.EVSite", "line_number": 92, "usage_type": "name" }, { "api_name": "sqlalchemy.Column", "line_number": 93, "usage_type": "call" }, { "api_name": "sqlalchemy.DateTime", "line_number": 93, "usage_type": "call" }, { "api_name": "datetime.datetime.now", "line_number": 93, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 93, "usage_type": "name" }, { "api_name": "sqlalchemy.Column", "line_number": 94, "usage_type": "call" }, { "api_name": "sqlalchemy.DateTime", "line_number": 94, "usage_type": "call" }, { "api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 94, "usage_type": "name" }, { "api_name": "sqlalchemy.orm.relationship", "line_number": 98, "usage_type": "call" }, { "api_name": "sqlalchemy.event.listen", "line_number": 111, "usage_type": "call" }, { "api_name": "sqlalchemy.event", "line_number": 111, "usage_type": "name" }, { "api_name": "sqlalchemy.DDL", "line_number": 114, "usage_type": "call" }, { "api_name": "sqlalchemy.event.listen", "line_number": 117, "usage_type": "call" }, { "api_name": "sqlalchemy.event", "line_number": 117, "usage_type": "name" }, { "api_name": "sqlalchemy.DDL", "line_number": 120, "usage_type": "call" }, { "api_name": "erovideo.models.engine.engine", "line_number": 131, "usage_type": "argument" } ]
293415917
from gensim.models import KeyedVectors import pandas as pd from sklearn.manifold import TSNE import numpy as np import matplotlib.pyplot as plt def show_words(words_list,lang_type): #่ฏๆ ‡็ญพ๏ผŒ่ฏๅ‘้‡ # ๅŠ ่ฝฝ่ฝฌๆขๆ–‡ไปถ model = KeyedVectors.load_word2vec_format("./Word2Vec/%s_word2vec.txt"%lang_type, binary=False) # ่พ“ๅ‡บ่ฏๅ‘้‡ word_vectors = [] for word in words_list: word_vectors.append(model.wv[word]) #ๅŠ ่ฝฝ่ฏๅ‘้‡ # TSE้™็ปด tsne = TSNE(n_components=2,perplexity=5,early_exaggeration=100,learning_rate=50,random_state=0,n_iter=10000,verbose=1) # ้™็ปดๆ“ไฝœ low_dim_embs = tsne.fit_transform(np.array(word_vectors)) print(low_dim_embs.shape) # ่ฝฌๆขๅŽ็š„่ฏๅ‘้‡ assert low_dim_embs.shape[0] == len(words_list) # ็”ปๅ›พๆ“ไฝœ plt.figure(figsize=(6,6)) # ็”ปๅ›พ for i, label in enumerate(words_list): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate(label, xy=(x,y), xytext=(2,4), textcoords='offset points', ha='right', va='bottom', fontsize=8) plt.savefig('%s_tsne.png'%lang_type) print('%s่ฏๅ‘้‡ๅฏ่ง†ๅŒ–ๅฎŒๆฏ•๏ผ'%lang_type) #-------------------------ๅ‚ๆ•ฐ้…็ฝฎ---------------------------------- sqlang_type = 'sqlang' # 93515ๆก # ้€‰็š„่ฏ่ฏญๅˆ—่กจ sqlang_words=[ 'row','column','table','group','tagstr','tagint','codint','value','data','server','sql','database','if','with','from','for','to','on','in','into','between','or','and','but','where','what','when','which','order','by','of','as','like','set','select','insert','update','delete','join','inner','create','check','view','use','get','*','(',')',':','='] csharp_type = 'csharp' # 10548ๆก # ้€‰็š„่ฏ่ฏญๅˆ—่กจ csharp_words=[ 'or','and','if','else','use','public','static','get','class','void','string','return','var','private','int','type','list','false','true','method','system','number','str','convert','sender','select','add','length','write','read','row','column','like','change','call','need','item','create','find','look','tagstr','codstr','tagint','codint','{','}','<','>','(',')'] javang_type = 'javang' # 10548ๆก # ้€‰็š„่ฏ่ฏญๅˆ—่กจ javang_words=['abstract','assert','boolean','break','byte','case','catch','char','class','const','continue','default','do','double','else','enum','final','finally','float','for','goto','if','import','int','interface','long','native','new','package','private','public','return','short','static','super','switch','this','try','void','while','tagstr','codstr','tagint','codint','{','}','<','>','(',')'] python_type = 'python' # 10548ๆก # ้€‰็š„่ฏ่ฏญๅˆ—่กจ python_words=[ 'or','and','not','as','if','else','elif','str','int','float','list','tuple','dict','bool','set','false','none','true','assert', 'break', 'class', 'continue','del', 'for', 'range', 'in','global', 'from','import', 'is', 'lambda', 'nonlocal', 'pass', 'raise','def','return', 'try','except','finally', 'while', 'with', 'yield','tagstr','tagint','{','}','<','>','(',')'] #-------------------------ๅ‚ๆ•ฐ้…็ฝฎ---------------------------------- if __name__ == '__main__': show_words(sqlang_words,sqlang_type) #show_words(csharp_words,csharp_type) #show_words(javang_words,javang_type) #show_words(python_words,python_type)
null
data/token_embed/visualization.py
visualization.py
py
3,285
python
en
code
null
code-starcoder2
83
[ { "api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 10, "usage_type": "call" }, { "api_name": "gensim.models.KeyedVectors", "line_number": 10, "usage_type": "name" }, { "api_name": "sklearn.manifold.TSNE", "line_number": 17, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 19, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.figure", "line_number": 26, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.scatter", "line_number": 30, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.annotate", "line_number": 31, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.savefig", "line_number": 32, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name" } ]
247955028
# load projection and helper functions import numpy as np import skymapper as skm import matplotlib.pylab as plt class Point: def __init__(self,x,y): self.x = x self.y = y def __getitem__(self, i): if i == 0: return self.x elif i == 1: return self.y raise NotImplementedError def getCatalog(size=10000, surveyname=None): # dummy catalog: uniform on sphere # Marsaglia (1972) xyz = np.random.normal(size=(size, 3)) r = np.sqrt((xyz**2).sum(axis=1)) dec = np.arccos(xyz[:,2]/r) / skm.DEG2RAD - 90 ra = - np.arctan2(xyz[:,0], xyz[:,1]) / skm.DEG2RAD if surveyname is not None: from matplotlib.patches import Polygon # construct survey polygon ra_fp, dec_fp = skm.survey_register[surveyname].getFootprint() poly = Polygon(np.dstack((ra_fp,dec_fp))[0], closed=True) inside = [poly.get_path().contains_point(Point(ra_,dec_)) for (ra_,dec_) in zip(ra,dec)] ra = ra[inside] dec = dec[inside] return ra, dec def makeHealpixMap(ra, dec, nside=1024, nest=False): # convert a ra/dec catalog into healpix map with counts per cell import healpy as hp ipix = hp.ang2pix(nside, (90-dec)/180*np.pi, ra/180*np.pi, nest=nest) return np.bincount(ipix, minlength=hp.nside2npix(nside)) def getHealpixCoords(pixels, nside, nest=False): # convert healpix cell indices to center ra/dec import healpy as hp theta, phi = hp.pix2ang(nside, pixels, nest=nest) return phi * 180. / np.pi, 90 - theta * 180. / np.pi if __name__ == "__main__": # load RA/Dec from catalog size = 100000 ra, dec = getCatalog(size, surveyname="DES") # define the best Albers projection for the footprint # minimizing the variation in distortion crit = skm.stdDistortion proj = skm.Albers.optimize(ra, dec, crit=crit) # construct map: will hold figure and projection # the outline of the sphere can be styled with kwargs for matplotlib Polygon map = skm.Map(proj) # add graticules, separated by 15 deg # the lines can be styled with kwargs for matplotlib Line2D # additional arguments for formatting the graticule labels sep=15 map.grid(sep=sep) # alter position of default labels at the outer meridians for m in [proj.ra_0 + 180, proj.ra_0 - 180]: map.labelParallelAtMeridian(m, verticalalignment='top', horizontalalignment='center') # remove labels at the south pole map.labelMeridianAtParallel(-90, meridians=[]) # add footprint, retain the polygon for clipping footprint = map.footprint("DES", zorder=20, edgecolor='#2222B2', facecolor='None', lw=1) #### 1. plot density in healpix cells #### nside = 32 mappable = map.density(ra, dec, nside=nside, clip_path=footprint) cb = map.colorbar(mappable, cb_label="$n$ [arcmin$^{-2}$]") # add random scatter plot len = 10 size = 100*np.random.rand(len) map.scatter(ra[:len], dec[:len], s=size, edgecolor='k', facecolor='None') # focus on relevant region map.focus(ra, dec) # entitle: access mpl figure map.fig.suptitle('Density with random scatter') # copy map without data contents map2 = map.clone() footprint2 = map2.footprint("DES", zorder=20, edgecolor='#2222B2', facecolor='None', lw=1) #### 2. show map distortion over the survey #### a,b = proj.distortion(ra, dec) mappable2 = map2.interpolate(ra, dec, 1-np.abs(b/a), vmin=0, vmax=0.3, clip_path=footprint2) cb2 = map2.colorbar(mappable2, cb_label='Distortion') map2.fig.suptitle('Projection distortion') #### 3. extrapolate RA over all sky #### map3 = skm.Map(proj) # show with 45 deg graticules sep=45 map3.grid(sep=sep) # alter position of default labels at the outer meridians for m in [proj.ra_0 + 180, proj.ra_0 - 180]: map3.labelParallelAtMeridian(m, verticalalignment='top', horizontalalignment='center') # alter number of labels at the south pole map3.labelMeridianAtParallel(-90, size=8, meridians=np.arange(0,360,90)) footprint3 = map3.footprint("DES", zorder=20, edgecolor='#2222B2', facecolor='None', lw=1) # this is slow when working with lots of samples... mappable3 = map3.extrapolate(ra[::10], dec[::10], dec[::10]) cb3 = map3.colorbar(mappable3, cb_label='Dec') map3.fig.suptitle('Extrapolation on the sphere') #### 4. test Healpix map functions #### try: # simply bin the counts of ra/dec m = makeHealpixMap(ra, dec, nside=nside) map4 = map.clone() footprint4 = map4.footprint("DES", zorder=20, edgecolor='#2222B2', facecolor='None', lw=1) mappable4 = map4.healpix(m, clip_path=footprint4, cmap="YlOrRd") cb4 = map4.colorbar(mappable2, cb_label="Healpix cell count") map4.fig.suptitle('Healpix map') except ImportError: pass
null
examples/example1.py
example1.py
py
4,929
python
en
code
null
code-starcoder2
83
[ { "api_name": "numpy.random.normal", "line_number": 20, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 20, "usage_type": "attribute" }, { "api_name": "numpy.sqrt", "line_number": 21, "usage_type": "call" }, { "api_name": "numpy.arccos", "line_number": 22, "usage_type": "call" }, { "api_name": "skymapper.DEG2RAD", "line_number": 22, "usage_type": "attribute" }, { "api_name": "numpy.arctan2", "line_number": 23, "usage_type": "call" }, { "api_name": "skymapper.DEG2RAD", "line_number": 23, "usage_type": "attribute" }, { "api_name": "skymapper.survey_register", "line_number": 28, "usage_type": "attribute" }, { "api_name": "matplotlib.patches.Polygon", "line_number": 29, "usage_type": "call" }, { "api_name": "numpy.dstack", "line_number": 29, "usage_type": "call" }, { "api_name": "healpy.ang2pix", "line_number": 39, "usage_type": "call" }, { "api_name": "numpy.pi", "line_number": 39, "usage_type": "attribute" }, { "api_name": "numpy.bincount", "line_number": 40, "usage_type": "call" }, { "api_name": "healpy.nside2npix", "line_number": 40, "usage_type": "call" }, { "api_name": "healpy.pix2ang", "line_number": 45, "usage_type": "call" }, { "api_name": "numpy.pi", "line_number": 46, "usage_type": "attribute" }, { "api_name": "skymapper.stdDistortion", "line_number": 57, "usage_type": "attribute" }, { "api_name": "skymapper.Albers.optimize", "line_number": 58, "usage_type": "call" }, { "api_name": "skymapper.Albers", "line_number": 58, "usage_type": "attribute" }, { "api_name": "skymapper.Map", "line_number": 62, "usage_type": "call" }, { "api_name": "numpy.random.rand", "line_number": 87, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 87, "usage_type": "attribute" }, { "api_name": "numpy.abs", "line_number": 102, "usage_type": "call" }, { "api_name": "skymapper.Map", "line_number": 107, "usage_type": "call" }, { "api_name": "numpy.arange", "line_number": 118, "usage_type": "call" } ]
418785973
# # Copyright (c) 2017-2021, The Storage Networking Industry Association. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # Neither the name of The Storage Networking Industry Association (SNIA) nor # the names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # #fa_ports_api.py import json, os import shutil import traceback import logging import g import urllib3 from flask import jsonify, request from flask_restful import Resource from api_emulator.utils import update_collections_json, create_path, get_json_data, create_and_patch_object, delete_object, patch_object, put_object, delete_collection, create_collection from .constants import * from .templates.fabric_adapter_port import get_FabricAdapterPorts_instance members =[] member_ids = [] config = {} INTERNAL_ERROR = 500 # FabricAdapterPorts API class FabricAdapterPortsAPI(Resource): def __init__(self, **kwargs): logging.info('FabricAdapterPortsAPI init called') self.root = PATHS['Root'] self.systems = PATHS['Systems']['path'] self.fabric_adapters = PATHS['Systems']['fabric_adapters'] self.fabric_adapter_ports = PATHS['Systems']['fabric_adapter_ports'] # HTTP GET def get(self, system, fabric_adapter, fabric_adapter_port): path = create_path(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports, fabric_adapter_port, 'index.json') return get_json_data (path) # HTTP POST # - Create the resource (since URI variables are available) # - Update the members and members.id lists # - Attach the APIs of subordinate resources (do this only once) # - Finally, create an instance of the subordiante resources def post(self, system, fabric_adapter, fabric_adapter_port): logging.info('FabricAdapterPortsAPI PUT called') path = create_path(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports, fabric_adapter_port) collection_path = os.path.join(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports, 'index.json') # Check if collection exists: if not os.path.exists(collection_path): FabricAdapterPortsCollectionAPI.post (self, system, fabric_adapter) if fabric_adapter in members: resp = 404 return resp try: global config wildcards = {'s_id': system, 'fa_id': fabric_adapter, 'fap_id': fabric_adapter_port, 'rb': g.rest_base} config=get_FabricAdapterPorts_instance(wildcards) config = create_and_patch_object (config, members, member_ids, path, collection_path) #Add default placeholder collections to instance. FabricAdapterPortsCollectionAPI.post (self, system, fabric_adapter) resp = config, 200 except Exception: traceback.print_exc() resp = INTERNAL_ERROR logging.info('FabricAdapterPortsAPI put exit') return resp # HTTP PATCH def patch(self, system, fabric_adapter, fabric_adapter_port): path = os.path.join(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports, fabric_adapter_port, 'index.json') patch_object(path) return self.get(system, fabric_adapter) # HTTP PUT def put(self, system, fabric_adapter, fabric_adapter_port): path = os.path.join(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports, fabric_adapter_port, 'index.json') put_object(path) return self.get(system, fabric_adapter) # HTTP DELETE def delete(self, system, fabric_adapter, fabric_adapter_port): path = create_path(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports, fabric_adapter_port) base_path = create_path(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports) return delete_object(path, base_path) # FabricAdapterPorts Collection API class FabricAdapterPortsCollectionAPI(Resource): def __init__(self): self.root = PATHS['Root'] self.systems = PATHS['Systems']['path'] self.fabric_adapters = PATHS['Systems']['fabric_adapters'] self.fabric_adapter_ports = PATHS['Systems']['fabric_adapter_ports'] def get(self, system, fabric_adapter): path = os.path.join(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports, 'index.json') return get_json_data (path) def verify(self, config): # TODO: Implement a method to verify that the POST body is valid return True,{} # HTTP POST # POST should allow adding multiple instances to a collection. # For now, this only adds one instance. # TODO: 'id' should be obtained from the request data. def post(self, system, fabric_adapter): logging.info('FabricAdapterPortsCollectionAPI POST called') self.root = PATHS['Root'] self.systems = PATHS['Systems']['path'] self.fabric_adapters = PATHS['Systems']['fabric_adapters'] self.fabric_adapter_ports = PATHS['Systems']['fabric_adapter_ports'] if fabric_adapter in members: resp = 404 return resp path = create_path(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports) return create_collection (path, 'Port') # HTTP PUT def put(self, system, fabric_adapter): path = os.path.join(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports, 'index.json') put_object(path) return self.get(system) # HTTP DELETE def delete(self, system, fabric_adapter): #Set path to object, then call delete_object: path = create_path(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports) base_path = create_path(self.root, self.systems, system, self.fabric_adapters, fabric_adapter) return delete_collection(path, base_path) class CreateFabricAdapterPorts (Resource): def __init__(self): self.root = PATHS['Root'] self.systems = PATHS['Systems']['path'] self.fabric_adapters = PATHS['Systems']['fabric_adapters'] self.fabric_adapter_ports = PATHS['Systems']['fabric_adapter_ports'] # Attach APIs for subordinate resource(s). Attach the APIs for a resource collection and its singletons def put(self,system, fabric_adapter): logging.info('CreateFabricAdapterPorts put started.') try: path = create_path(self.root, self.systems, system, self.fabric_adapters, fabric_adapter, self.fabric_adapter_ports) if not os.path.exists(path): os.mkdir(path) else: logging.info('The given path : {} already exists.'.format(path)) config={ "@Redfish.Copyright": "Copyright 2015-2021 SNIA. All rights reserved.", "@odata.id": "/redfish/v1/Systems/{system}/FabricAdapters/{fabric_adapter}/Ports", "@odata.type": "#PortCollection.PortCollection", "Name": "Port Collection", "[email protected]": 0, "Members": [ ] } with open(os.path.join(path, "index.json"), "w") as fd: fd.write(json.dumps(config, indent=4, sort_keys=True)) resp = config, 200 except Exception: traceback.print_exc() resp = INTERNAL_ERROR logging.info('CreateFabricAdapterPorts put exit.') return resp
null
api_emulator/redfish/fa_ports_api.py
fa_ports_api.py
py
9,153
python
en
code
null
code-starcoder2
83
[ { "api_name": "flask_restful.Resource", "line_number": 53, "usage_type": "name" }, { "api_name": "logging.info", "line_number": 55, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_path", "line_number": 63, "usage_type": "call" }, { "api_name": "api_emulator.utils.get_json_data", "line_number": 64, "usage_type": "call" }, { "api_name": "logging.info", "line_number": 72, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_path", "line_number": 73, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 74, "usage_type": "call" }, { "api_name": "os.path", "line_number": 74, "usage_type": "attribute" }, { "api_name": "os.path.exists", "line_number": 77, "usage_type": "call" }, { "api_name": "os.path", "line_number": 77, "usage_type": "attribute" }, { "api_name": "g.rest_base", "line_number": 85, "usage_type": "attribute" }, { "api_name": "templates.fabric_adapter_port.get_FabricAdapterPorts_instance", "line_number": 86, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_and_patch_object", "line_number": 87, "usage_type": "call" }, { "api_name": "traceback.print_exc", "line_number": 93, "usage_type": "call" }, { "api_name": "logging.info", "line_number": 95, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 100, "usage_type": "call" }, { "api_name": "os.path", "line_number": 100, "usage_type": "attribute" }, { "api_name": "api_emulator.utils.patch_object", "line_number": 101, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 106, "usage_type": "call" }, { "api_name": "os.path", "line_number": 106, "usage_type": "attribute" }, { "api_name": "api_emulator.utils.put_object", "line_number": 107, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_path", "line_number": 112, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_path", "line_number": 113, "usage_type": "call" }, { "api_name": "api_emulator.utils.delete_object", "line_number": 114, "usage_type": "call" }, { "api_name": "flask_restful.Resource", "line_number": 118, "usage_type": "name" }, { "api_name": "os.path.join", "line_number": 127, "usage_type": "call" }, { "api_name": "os.path", "line_number": 127, "usage_type": "attribute" }, { "api_name": "api_emulator.utils.get_json_data", "line_number": 128, "usage_type": "call" }, { "api_name": "logging.info", "line_number": 139, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_path", "line_number": 148, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_collection", "line_number": 149, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 153, "usage_type": "call" }, { "api_name": "os.path", "line_number": 153, "usage_type": "attribute" }, { "api_name": "api_emulator.utils.put_object", "line_number": 154, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_path", "line_number": 160, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_path", "line_number": 161, "usage_type": "call" }, { "api_name": "api_emulator.utils.delete_collection", "line_number": 162, "usage_type": "call" }, { "api_name": "flask_restful.Resource", "line_number": 164, "usage_type": "name" }, { "api_name": "logging.info", "line_number": 173, "usage_type": "call" }, { "api_name": "api_emulator.utils.create_path", "line_number": 175, "usage_type": "call" }, { "api_name": "os.path.exists", "line_number": 176, "usage_type": "call" }, { "api_name": "os.path", "line_number": 176, "usage_type": "attribute" }, { "api_name": "os.mkdir", "line_number": 177, "usage_type": "call" }, { "api_name": "logging.info", "line_number": 179, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 190, "usage_type": "call" }, { "api_name": "os.path", "line_number": 190, "usage_type": "attribute" }, { "api_name": "json.dumps", "line_number": 191, "usage_type": "call" }, { "api_name": "traceback.print_exc", "line_number": 195, "usage_type": "call" }, { "api_name": "logging.info", "line_number": 197, "usage_type": "call" } ]
217053346
import json import time from itertools import product from random import randint from pprint import pprint import multiprocessing def build_machines(m=3): # Create machines A = [] for i in range(m): A.append([]) return A def generate_jobs(n=10, min_time=1, max_time=10): # Generate a list of random jobs J = [] for i in range(n): t = randint(min_time, max_time) J.append(t) return J # Utilities def makespan(A): return max(sum(Ai) for Ai in A) def find_least_busy_machine(A): # First, get totals totals = (sum(Ai) for Ai in A) val, idx = min((val, idx) for (idx, val) in enumerate(totals)) return idx # online def online(m, J): # Create machines and jobs A = build_machines(m) # pprint("Machines: {}".format(A)) # pprint("Jobs: {}".format(J)) # pprint("Makespan: {}".format(makespan(A))) # Let's assume we take one job at a time for j in J: # Find the least busy machine lbm = find_least_busy_machine(A) # pprint("Least busy machine: {}".format(lbm)) # And assign the job there A[lbm].append(j) # pprint("Result Machines: {}".format(A)) # pprint("Final Makespan: {}".format(makespan(A))) return A def offline(m, J): # Now, we need to explore all combinations def find_combinations(n_machines, n_jobs): # Create iterables ms = range(n_machines) js = range(n_jobs) # Create combinations between each machine and # job combs = [product(ms, [ji]) for ji in js] # And yield combinations i = 0 for c in product(*combs): i += 1 # if i == 100: # break # print c yield c # Get the generator gen = find_combinations(m, len(J)) # Now, keep track of the best makespan found best = float('inf') best_comb = None # Generate machine counters m_range = range(m) M = [0 for _ in m_range] # Iterate over all combinations for c in gen: # print c # Pick each assignment for machine, job in c: # Add the value to the matrix M[machine] += J[job] # Makespan m = max(M) # Keep if better if m < best: best = m best_comb = c # Reset M for i in m_range: M[i] = 0 # Generate A A = [[] for _ in m_range] for machine, job in best_comb: A[machine].append(J[job]) return A def show(A): for i, Ai in enumerate(A): print("{}: {}".format(i, '|' * sum(Ai))) def run(p): m, n = p J = generate_jobs(n) online_A = online(m, J) offline_A = offline(m, J) return (makespan(online_A), makespan(offline_A)) def test(m, n): m = 2 n = 10 J = generate_jobs(n) print("Number of machines: {}".format(m)) print("Number of jobs: {}".format(n)) print("Jobs: {}".format(J)) online_A = online(m, J) offline_A = offline(m, J) print("Offline Makespan: {}".format(makespan(offline_A))) print(offline_A) show(offline_A) print("Online Makespan: {}".format(makespan(online_A))) print(online_A) show(online_A) assert makespan(offline_A) <= makespan(online_A) if __name__ == '__main__': m = 3 n = 10 N = 10000 # Use None to pick the max number # of processors processors = None # processors = 4 # Print info print("Number of machines: {}".format(m)) print("Number of jobs: {}".format(n)) print("Executions: {}".format(N)) # Crete a pool of workers p = multiprocessing.Pool(processes=processors) # And collect results results = p.map(run, ((m, n) for _ in xrange(N))) # pprint(results) # Dump results dump_folder = 'dumps/' file_name = 'results-%s.json' % time.time() r = { 'm': m, 'n': n, 'N': N, 'data': results } with open(dump_folder + file_name, 'w') as f: json.dump(r, f, indent=2)
null
main.py
main.py
py
3,551
python
en
code
null
code-starcoder2
83
[ { "api_name": "random.randint", "line_number": 22, "usage_type": "call" }, { "api_name": "itertools.product", "line_number": 69, "usage_type": "call" }, { "api_name": "itertools.product", "line_number": 73, "usage_type": "call" }, { "api_name": "multiprocessing.Pool", "line_number": 170, "usage_type": "call" }, { "api_name": "time.time", "line_number": 180, "usage_type": "call" }, { "api_name": "json.dump", "line_number": 190, "usage_type": "call" } ]
10072179
import tkinter as tk from tkinter import * from tkinter import messagebox from tkinter.filedialog import askopenfilename from keras.models import model_from_json import cv2 import numpy as np from PIL import Image, ImageTk top = tk.Tk() top.minsize(100,100) top.geometry('500x500') top.maxsize(600,600) top.configure(bg='black') def OpenFile(): name = askopenfilename(initialdir="C:/Users/Ketan Ingale/Documents/SDL_Project/Dataset/test", filetypes =(("JPG File", "*.jpg"),("All Files","*.*")), title = "Choose a file." ) print (name) json_file = open('C:/Users/Ketan Ingale/Documents/SDL_Project/model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights("C:/Users/Ketan Ingale/Documents/SDL_Project/model.h5") loaded_model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) img = cv2.imread(name) img = cv2.resize(img, (50,50)) img = img.reshape(1, 50, 50, 3) image = ImageTk.PhotoImage(Image.open(name)) if loaded_model.predict(img) == 1: helloCallBack("It's a dog.", name) elif loaded_model.predict(img) == 0: helloCallBack("It's a cat", name) else: helloCallBack("It's nothing like from the dataset.", name) def helloCallBack(str, name): image = ImageTk.PhotoImage(Image.open(name)) tk.Label(top, image=image).pack() msg = messagebox.showinfo("Result : ",str) B = tk.Button(top, text = "Upload Image", command = OpenFile, highlightthickness = 0, bd = 0, fg = 'white', bg = 'black') B.place(relx = 0.5, rely = 0.5, anchor = CENTER) top.mainloop()
null
Cat_and_Dog_Classifier.py
Cat_and_Dog_Classifier.py
py
1,770
python
en
code
null
code-starcoder2
83
[ { "api_name": "tkinter.Tk", "line_number": 12, "usage_type": "call" }, { "api_name": "tkinter.filedialog.askopenfilename", "line_number": 20, "usage_type": "call" }, { "api_name": "keras.models.model_from_json", "line_number": 29, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 35, "usage_type": "call" }, { "api_name": "cv2.resize", "line_number": 36, "usage_type": "call" }, { "api_name": "PIL.ImageTk.PhotoImage", "line_number": 38, "usage_type": "call" }, { "api_name": "PIL.ImageTk", "line_number": 38, "usage_type": "name" }, { "api_name": "PIL.Image.open", "line_number": 38, "usage_type": "call" }, { "api_name": "PIL.Image", "line_number": 38, "usage_type": "name" }, { "api_name": "PIL.ImageTk.PhotoImage", "line_number": 48, "usage_type": "call" }, { "api_name": "PIL.ImageTk", "line_number": 48, "usage_type": "name" }, { "api_name": "PIL.Image.open", "line_number": 48, "usage_type": "call" }, { "api_name": "PIL.Image", "line_number": 48, "usage_type": "name" }, { "api_name": "tkinter.Label", "line_number": 49, "usage_type": "call" }, { "api_name": "tkinter.messagebox.showinfo", "line_number": 50, "usage_type": "call" }, { "api_name": "tkinter.messagebox", "line_number": 50, "usage_type": "name" }, { "api_name": "tkinter.Button", "line_number": 52, "usage_type": "call" } ]
288256488
import numpy as np import regreg.api as rr import pandas as pd import selection.api as sel from selection.tests.instance import gaussian_instance from selection.algorithms.lasso import lasso import selection.tests.reports as reports from selection.tests.flags import SMALL_SAMPLES, SET_SEED from selection.tests.decorators import wait_for_return_value, set_seed_iftrue, set_sampling_params_iftrue, register_report from statsmodels.sandbox.stats.multicomp import multipletests from selection.randomized.cv_view import CV_view from scipy.stats import norm as ndist from scipy.optimize import bisect from selection.randomized.query import (naive_pvalues, naive_confidence_intervals) def compute_projection_parameters(n, p, s, signal, rho, sigma, active): multiple = 10**2 n_large = multiple*n X_large = np.zeros((n_large,p)) y_large = np.zeros(n_large) for i in range(multiple): X_large[(i*n):((i+1)*n), :], y_large[(i*n):((i+1)*n)], _, _, _ = \ gaussian_instance(n=n, p=p, s=s, signal=signal, rho=rho, sigma=sigma, scale=True, center=True) proj_param = np.linalg.lstsq(X_large[:, active], y_large)[0] print(proj_param) return proj_param @register_report(['naive_pvalues', 'covered_naive', 'ci_length_naive', 'active_var']) @set_seed_iftrue(SET_SEED) @set_sampling_params_iftrue(SMALL_SAMPLES, burnin=10, ndraw=10) @wait_for_return_value() def test_naive(n=300, p=100, s=10, signal = 3.5, rho = 0., sigma = 1., cross_validation=True, condition_on_CVR=False, lam_frac = 1., X = None, check_screen = False, check_projection_param = False, check_selected_param = True, intervals = False): print(n, p, s) if X is None: X, y, beta, truth, sigma = gaussian_instance(n=n, p=p, s=s, signal=signal, rho=rho, \ sigma=sigma, scale=True, center=True) else: beta = np.zeros(p) beta[:s] = signal y = X.dot(beta) + np.random.standard_normal(n)*sigma truth = np.nonzero(beta != 0)[0] if cross_validation: cv = CV_view(rr.glm.gaussian(X,y), loss_label="gaussian", lasso_randomization=None, epsilon=None, scale1=None, scale2=None) cv.solve(glmnet=True) lam = cv.lam_CVR if condition_on_CVR: cv.condition_on_opt_state() lam = cv.one_SD_rule(direction="up") else: lam = lam_frac*np.fabs(X.T.dot(np.random.normal(1, 1. / 2, (n, 1000)))).max() L = lasso.gaussian(X, y, lam, sigma=sigma) soln = L.fit() active = soln != 0 nactive = active.sum() print("nactive", nactive) if nactive==0: return None active_signs = np.sign(soln[active]) active_set = np.nonzero(active)[0] if (check_screen==False): if check_projection_param==True: true_vec = compute_projection_parameters(n, p, s, signal, rho, sigma, active) else: true_vec = signal*np.array([active_set[i] in truth for i in range(nactive)], int) print(true_vec) else: true_vec = beta[active] if (check_screen == False) or (set(truth).issubset(np.nonzero(active)[0])): print("active set", active_set) active_var = np.zeros(nactive, np.bool) naive_pvalues = np.zeros(nactive) naive_length = np.zeros(nactive) naive_covered = np.zeros(nactive) C = L.constraints if C is not None: one_step = L.onestep_estimator for i in range(one_step.shape[0]): eta = np.zeros_like(one_step) eta[i] = active_signs[i] alpha = 0.1 def naive_inference(): obs = (eta * one_step).sum() sd = np.sqrt(np.dot(eta.T, C.covariance.dot(eta))) Z = obs / sd # use Phi truncated to [-5,5] _pval = ndist.cdf(Z) _pval = 2 * min(_pval, 1 - _pval) _interval = (obs - ndist.ppf(1 - alpha / 2) * sd, obs + ndist.ppf(1 - alpha / 2) * sd) return _pval, _interval naive_pvalues[i], _naive_interval = naive_inference() def coverage(LU): L, U = LU[0], LU[1] _length = U - L _covered = 0 if (L <= true_vec[i]) and (U >= true_vec[i]): _covered = 1 return _covered, _length naive_covered[i], naive_length[i] = coverage(_naive_interval) active_var[i] = active_set[i] in truth else: return None print("naive pvalues",naive_pvalues) return naive_pvalues, naive_covered, naive_length, active_var def report(niter=50, design="random", **kwargs): if design=="fixed": X, _, _, _, _ = gaussian_instance(**kwargs) kwargs.update({'X':X}) kwargs.update({'cross_validation':True, 'condition_on_CVR':False}) intervals_report = reports.reports['test_naive'] screened_results = reports.collect_multiple_runs(intervals_report['test'], intervals_report['columns'], niter, reports.summarize_all, **kwargs) screened_results.to_pickle("naive.pkl") results = pd.read_pickle("naive.pkl") fig = reports.naive_pvalue_plot(results) #fig = reports.pvalue_plot(results, label="Naive p-values") fig.suptitle("Naive p-values", fontsize=20) fig.savefig('naive_pvalues.pdf') if __name__ == '__main__': np.random.seed(500) kwargs = {'s': 0, 'n': 100, 'p': 50, 'signal': 3.5, 'sigma': 1, 'rho': 0., 'intervals':True} report(niter=100, **kwargs)
null
selection/randomized/tests/test_naive.py
test_naive.py
py
6,088
python
en
code
null
code-starcoder2
83
[ { "api_name": "numpy.zeros", "line_number": 19, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 20, "usage_type": "call" }, { "api_name": "selection.tests.instance.gaussian_instance", "line_number": 24, "usage_type": "call" }, { "api_name": "numpy.linalg.lstsq", "line_number": 26, "usage_type": "call" }, { "api_name": "numpy.linalg", "line_number": 26, "usage_type": "attribute" }, { "api_name": "selection.tests.instance.gaussian_instance", "line_number": 55, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 58, "usage_type": "call" }, { "api_name": "numpy.random.standard_normal", "line_number": 60, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 60, "usage_type": "attribute" }, { "api_name": "numpy.nonzero", "line_number": 62, "usage_type": "call" }, { "api_name": "selection.randomized.cv_view.CV_view", "line_number": 65, "usage_type": "call" }, { "api_name": "regreg.api.glm.gaussian", "line_number": 65, "usage_type": "call" }, { "api_name": "regreg.api.glm", "line_number": 65, "usage_type": "attribute" }, { "api_name": "regreg.api", "line_number": 65, "usage_type": "name" }, { "api_name": "numpy.fabs", "line_number": 75, "usage_type": "call" }, { "api_name": "numpy.random.normal", "line_number": 75, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 75, "usage_type": "attribute" }, { "api_name": "selection.algorithms.lasso.lasso.gaussian", "line_number": 77, "usage_type": "call" }, { "api_name": "selection.algorithms.lasso.lasso", "line_number": 77, "usage_type": "name" }, { "api_name": "numpy.sign", "line_number": 86, "usage_type": "call" }, { "api_name": "numpy.nonzero", "line_number": 87, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 94, "usage_type": "call" }, { "api_name": "numpy.nonzero", "line_number": 100, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 103, "usage_type": "call" }, { "api_name": "numpy.bool", "line_number": 103, "usage_type": "attribute" }, { "api_name": "selection.randomized.query.naive_pvalues", "line_number": 105, "usage_type": "name" }, { "api_name": "numpy.zeros", "line_number": 105, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 106, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 107, "usage_type": "call" }, { "api_name": "numpy.zeros_like", "line_number": 114, "usage_type": "call" }, { "api_name": "numpy.sqrt", "line_number": 120, "usage_type": "call" }, { "api_name": "numpy.dot", "line_number": 120, "usage_type": "call" }, { "api_name": "scipy.stats.norm.cdf", "line_number": 123, "usage_type": "call" }, { "api_name": "scipy.stats.norm", "line_number": 123, "usage_type": "name" }, { "api_name": "scipy.stats.norm.ppf", "line_number": 125, "usage_type": "call" }, { "api_name": "scipy.stats.norm", "line_number": 125, "usage_type": "name" }, { "api_name": "scipy.stats.norm.ppf", "line_number": 126, "usage_type": "call" }, { "api_name": "scipy.stats.norm", "line_number": 126, "usage_type": "name" }, { "api_name": "selection.randomized.query.naive_pvalues", "line_number": 129, "usage_type": "name" }, { "api_name": "selection.randomized.query.naive_pvalues", "line_number": 144, "usage_type": "argument" }, { "api_name": "selection.randomized.query.naive_pvalues", "line_number": 146, "usage_type": "name" }, { "api_name": "selection.tests.decorators.register_report", "line_number": 33, "usage_type": "call" }, { "api_name": "selection.tests.decorators.set_seed_iftrue", "line_number": 34, "usage_type": "call" }, { "api_name": "selection.tests.flags.SET_SEED", "line_number": 34, "usage_type": "argument" }, { "api_name": "selection.tests.decorators.set_sampling_params_iftrue", "line_number": 35, "usage_type": "call" }, { "api_name": "selection.tests.flags.SMALL_SAMPLES", "line_number": 35, "usage_type": "argument" }, { "api_name": "selection.tests.decorators.wait_for_return_value", "line_number": 36, "usage_type": "call" }, { "api_name": "selection.tests.instance.gaussian_instance", "line_number": 152, "usage_type": "call" }, { "api_name": "selection.tests.reports.reports", "line_number": 156, "usage_type": "attribute" }, { "api_name": "selection.tests.reports", "line_number": 156, "usage_type": "name" }, { "api_name": "selection.tests.reports.collect_multiple_runs", "line_number": 157, "usage_type": "call" }, { "api_name": "selection.tests.reports", "line_number": 157, "usage_type": "name" }, { "api_name": "selection.tests.reports.summarize_all", "line_number": 160, "usage_type": "attribute" }, { "api_name": "selection.tests.reports", "line_number": 160, "usage_type": "name" }, { "api_name": "pandas.read_pickle", "line_number": 164, "usage_type": "call" }, { "api_name": "selection.tests.reports.naive_pvalue_plot", "line_number": 166, "usage_type": "call" }, { "api_name": "selection.tests.reports", "line_number": 166, "usage_type": "name" }, { "api_name": "numpy.random.seed", "line_number": 174, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 174, "usage_type": "attribute" } ]
149849138
import numpy from datetime import datetime import random from scipy.spatial import distance as scipy_dist class HierarchicalClustering(): """This class will create an object with which has the necessary attributes and functions to perform bottom-up agglomerative hierarchical clustering on a set of provided ligand objects.""" def __init__(self,distance_paradigm,cluster_number): self.dist_prdgm = distance_paradigm self.dendogram = [] self.cluster_number = cluster_number def update_cluster(self,cluster_number): """This function resets the number of clusters to find in the algorithm by the number given by the user here""" self.cluster_number = cluster_number def get_data(self,objects_to_cluster): """This function takes the list of ligand objects to cluster given by the user and assigns them to the internal variable to_cluster. This function also uses the given ligands to populate the expanded data_expanded matrix""" self.to_cluster = objects_to_cluster self.data_expanded = numpy.zeros((len(self.to_cluster),1024)) i = 0 for x in self.to_cluster: self.data_expanded[i,x.OnBits] = 1 i = i + 1 def calculate_distances(self): """This function is called internally when the cluster function is called by the user. It calls the distance fuction for each ligand pair. It populates this data into the all_data internal attribute.""" num_data = int(len(self.to_cluster)) print(num_data) self.all_distances = numpy.full((num_data,num_data),100) for i in range(num_data): for j in range(i+1,num_data): self.all_distances[i,j] = self.distance(i,j) #print(i, j) if i%100 == 0: print("i is ",i,end = '\r') def single_linkage(self,dendogram_level): """function to determine distance between clusters for single linkage returns the indices of the current dendogram level to be lumped together""" unique_cluster_list = numpy.unique(self.dendogram[dendogram_level,:]) #get the list of unique clusters at this level of the dendogram num_unique_clusters = len(unique_cluster_list) #get the number of unique clusters at this level of the dendogram for i in range(num_unique_clusters): for j in range(i+1,num_unique_clusters): entries_i = numpy.where(self.dendogram[dendogram_level,:] == unique_cluster_list[i])[0] entries_j = numpy.where(self.dendogram[dendogram_level,:] == unique_cluster_list[j])[0] single_distance = 100 for z in entries_i: for zed in entries_j: single_temp_distance = self.all_distances[z,zed] if single_temp_distance < single_distance: single_distance = single_temp_distance self.temp_distances[i,j] = single_distance smallest_temp_distance = numpy.amin(self.temp_distances) first_cluster_index = numpy.where(self.temp_distances == smallest_temp_distance)[0][0] second_cluster_index = numpy.where(self.temp_distances == smallest_temp_distance)[1][0] return numpy.hstack((numpy.where(self.dendogram[dendogram_level,:] == unique_cluster_list[first_cluster_index])[0],numpy.where(self.dendogram[dendogram_level,:] == unique_cluster_list[second_cluster_index])[0])) def distance(self,object_1_index,object_2_index): """function to determine distance between two ligands, uses euclidian distance""" total_distance = len(self.to_cluster[object_1_index].OnBits) + len(self.to_cluster[object_2_index].OnBits) - 2*numpy.sum(self.to_cluster[object_1_index].OnBits == self.to_cluster[object_2_index].OnBits) euclid_dist = numpy.sqrt(total_distance) #print(euclid_dist) return euclid_dist def cluster(self): """function to determine clusters""" self.calculate_distances() print('Distances_Done') self.dendogram = numpy.zeros((len(self.to_cluster),len(self.to_cluster))) self.dendogram[0,:] = numpy.arange(len(self.to_cluster)) #every object gets its own cluster number ## goes through agglomeration procedure for zed in range(1,len(self.to_cluster)-self.cluster_number+1): self.temp_distances = numpy.full((len(self.to_cluster),len(self.to_cluster)),100) # square matrix to find distance between all existing clusters indices_to_lump = self.single_linkage(zed-1) #return an array of indices in the dendogram to lump together lowest_previous_cluster = numpy.amin(self.dendogram[zed-1,indices_to_lump]) self.dendogram[zed,:] = self.dendogram[zed-1,:] self.dendogram[zed,indices_to_lump] = lowest_previous_cluster now = datetime.now() current_time = now.strftime("%H:%M:%S") print("Current Time 2 =", current_time, "zed is ", zed,end='\r') class PartitionClustering(): """This class implements vanilla K-means clustering""" def __init__(self,cluster_number,OnBit_Dim): """Two parameters must be passed upon class initialization cluster_number: int, determines the number of clusters to find OnBit_Dim: int, indicate the dimension space of the OnBits so they can be appropriately unpacked """ self.cluster_number = cluster_number self.Bit_Num = OnBit_Dim def get_data(self,objects_to_cluster): """This function reads in the ligand objects and sets up essential class attributes to be used in clustering """ self.to_cluster = objects_to_cluster self.total_objects = len(self.to_cluster) self.cluster_assignments = numpy.zeros((self.total_objects,1)) self.data_expanded = numpy.zeros((self.total_objects,self.Bit_Num)) i = 0 for x in self.to_cluster: self.data_expanded[i,x.OnBits] = 1 i = i + 1 self.create_similarity_matrix() print('Data Imported') def create_similarity_matrix(self): """This function goes through each pair of ligands in to_cluster and calculates their similarity by the tanimoto coefficient and assigns this to the internal similarity matrix attribute.""" self.similarity_matrix = numpy.zeros((self.total_objects,self.total_objects)) for i in range(self.total_objects): for j in range(self.total_objects): self.similarity_matrix[i,j] = self.tanamota_coeff(self.to_cluster[i],self.to_cluster[j]) print('i is',i,end='\r') def tanamota_coeff(self,object_1,object_2): """This function determines the similarity of two ligand objects by calculating their tanimoto coefficient.""" intersection = numpy.sum(object_1.OnBits == object_2.OnBits) union = len(object_1.OnBits) + len(object_2.OnBits) - numpy.sum(object_1.OnBits == object_2.OnBits) return intersection/union def distance(self,object_1,object_2): """function to determine distance between two ligands, uses euclidian distance, expects 1D arrays of numerical values""" total_distance = 0 obj_dimension = object_1.shape[0] for i in range(obj_dimension): total_distance = total_distance + (object_1[i]-object_2[i])**2 euclid_dist = numpy.sqrt(total_distance) return euclid_dist def initialize_centroids(self): """This function initializes the number of centroids indicated by cluster_number as random ligands in the similarity space""" self.centroids = numpy.zeros((self.cluster_number,self.total_objects)) available_indices = list(range(self.total_objects)) #used to make sure centroids aren't initialized to the same ligand for i in range(self.cluster_number): rand_location = random.randrange(len(available_indices)) self.centroids[i,:] = self.similarity_matrix[available_indices[rand_location],:] available_indices.remove(available_indices[rand_location]) def update_centroids(self): """This function finds the average location of ligands assigned to a given cluster and re-assigns the centroid to that location. If there is an emtpy cluster the centriod is reassigned to a random ligand in the data set.""" for i in range(self.cluster_number): cluster_data_expanded = self.similarity_matrix[numpy.where(self.cluster_assignments == i)[0],:] self.centroids[i,:] = cluster_data_expanded.mean(axis=0) if numpy.isnan(self.centroids[i,:]).any(): rand_location = random.randrange(self.total_objects) self.centroids[i,:] = self.similarity_matrix[rand_location,:] print("Na fixed") def assign_cluster(self): """This function finds the closest centroid to each ligand. It then assigns that centroid number as the cluster for that ligand""" temp_center_distances = numpy.zeros((self.total_objects,self.cluster_number)) for i in range(self.total_objects): for j in range(self.cluster_number): temp_center_distances[i,j] = self.distance(self.similarity_matrix[i,:],self.centroids[j,:]) self.cluster_assignments[i] = numpy.argmin(temp_center_distances[i,:]) unique, counts = numpy.unique(self.cluster_assignments, return_counts=True) print(dict(zip(unique, counts))) def update_cluster_number(self,cluster_number): """This function updates the number of clusters to find""" self.cluster_number = cluster_number def is_cluster_over(self): """This function determines whether to stop the clustering procedure. If the current centroids are the same as either the previous or 2nd back set of centroids then clustering stops.""" if numpy.array_equal(self.centroids,self.centroids_1back) or numpy.array_equal(self.centroids,self.centroids_2back): self.keep_clustering = False def cluster(self): """This function performs all the necessary steps to execute k-means clustering""" self.initialize_centroids() self.keep_clustering = True self.centroids_1back = numpy.zeros((self.cluster_number,self.total_objects)) self.centroids_2back = numpy.zeros((self.cluster_number,self.total_objects)) i = 1 while self.keep_clustering: self.assign_cluster() self.centroids_2back = numpy.copy(self.centroids_1back) self.centroids_1back = numpy.copy(self.centroids) self.update_centroids() self.is_cluster_over() print('Iterrations Completed:',i,end='\r') i = i +1 class ligand(): """This simple class organizes all the import attributes for a ligand into a single object with each held as an attribute""" def __init__(self,ligand_string): import numpy first_comma = ligand_string.find(",") second_comma = ligand_string.find(",",first_comma+1) third_comma = ligand_string.find(",",second_comma+1) final_n = ligand_string.find("\n",second_comma+1) self.ID = ligand_string[0:first_comma] self.score = float(ligand_string[first_comma+1:second_comma]) self.smiles = ligand_string[second_comma+1:third_comma] onBits = ligand_string[third_comma+2:final_n-1] self.OnBits = numpy.array([int(i) for i in onBits.split(',')]) ##below are functions that are useful for evaluating clusters def silhouette_score(distance_matrix,cluster_list,POI): """this defines the silhouette score for a given point""" a_temp = 0 b_temp = 0 score = 0 close_cluster = closest_cluster(distance_matrix,cluster_list,POI) #first find the closest cluster parent_cluster_indices = numpy.where(cluster_list == cluster_list[POI])[0] #get the ligand indices for the ligands in the same cluster close_cluster_indices = numpy.where(cluster_list == close_cluster)[0] #get the ligand indices for the ligands in the closest cluster num_parent_cluster = len(parent_cluster_indices) num_close_cluster = len(close_cluster_indices) #find the average distance to ligands in the same cluster for i in range(num_parent_cluster): if parent_cluster_indices[i] != POI: a_temp = a_temp + distance_matrix[parent_cluster_indices[i],POI] if num_parent_cluster > 1: a_final = a_temp/(num_parent_cluster-1) #if cluster is a single ligand define as 0 else: a_final = 0 #find the average distance to all the ligands in the closest cluster for j in range(num_close_cluster): b_temp = b_temp + distance_matrix[close_cluster_indices[j],POI] b_final = b_temp/num_close_cluster score = (b_final - a_final)/max(a_final,b_final) return score def Jaccard_Index(cluster_list_1,cluster_list_2): """Compares the similarity in clustering results by calculating the jaccard index""" num_ligand = len(cluster_list_1) f_11 = 0 # same in both f_01 = 0 # same in 1 not in 2 f_10 = 0 # same in 2 not in 1 f_00 = 0 # diff in both for i in range(0,num_ligand): for j in range(i+1,num_ligand): ligand_1_clust_1 = cluster_list_1[i] ligand_1_clust_2 = cluster_list_2[i] ligand_2_clust_1 = cluster_list_1[j] ligand_2_clust_2 = cluster_list_2[j] if ligand_1_clust_1 == ligand_2_clust_1 and ligand_1_clust_2 == ligand_2_clust_2: f_11 = f_11 + 1 if ligand_1_clust_1 == ligand_2_clust_1 and ligand_1_clust_2 != ligand_2_clust_2: f_01 = f_01 + 1 if ligand_1_clust_1 != ligand_2_clust_1 and ligand_1_clust_2 == ligand_2_clust_2: f_10 = f_10 + 1 if ligand_1_clust_1 != ligand_2_clust_1 and ligand_1_clust_2 != ligand_2_clust_2: f_00 = f_00 + 1 print(f_11,f_01,f_10,f_00) jaccard_index = f_11/(f_01+f_10+f_11) return jaccard_index def euclid_distance(similarity_matrix): """This function takes a similarity matrix and creates a euclidean distance matrix based upon that """ obj_dimension = similarity_matrix.shape[0] print(obj_dimension) distance_matrix = numpy.zeros((obj_dimension,obj_dimension)) for m in range(obj_dimension): for n in range(obj_dimension): distance_matrix[m,n] = scipy_dist.euclidean(similarity_matrix[m,:],similarity_matrix[n,:]) print(m,end='\r') return distance_matrix def closest_cluster(distance_matrix,cluster_list,ligand_index): """This function finds the cluster that is closest to the given ligand that is not contained within the ligands home cluster""" num_ligands = distance_matrix.shape[0] current_cluster = cluster_list[ligand_index] closest_distance = num_ligands #similarity scores are bounded at 1. So max euclidean distance is sqrt(num_ligands). Therefore, num_ligands linearly should always exceed close_cluster = -1 #no cluster is ever called -1, so returning -1 indicates an error for i in range(num_ligands): temp_distance = distance_matrix[ligand_index,i] if temp_distance < closest_distance and cluster_list[i] != current_cluster: closest_distance = temp_distance close_cluster = cluster_list[i] return close_cluster
null
clusters/algs.py
algs.py
py
15,992
python
en
code
null
code-starcoder2
83
[ { "api_name": "numpy.zeros", "line_number": 22, "usage_type": "call" }, { "api_name": "numpy.full", "line_number": 33, "usage_type": "call" }, { "api_name": "numpy.unique", "line_number": 45, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 52, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 53, "usage_type": "call" }, { "api_name": "numpy.amin", "line_number": 65, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 66, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 67, "usage_type": "call" }, { "api_name": "numpy.hstack", "line_number": 70, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 70, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 75, "usage_type": "call" }, { "api_name": "numpy.sqrt", "line_number": 76, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 84, "usage_type": "call" }, { "api_name": "numpy.arange", "line_number": 85, "usage_type": "call" }, { "api_name": "numpy.full", "line_number": 90, "usage_type": "call" }, { "api_name": "numpy.amin", "line_number": 93, "usage_type": "call" }, { "api_name": "datetime.datetime.now", "line_number": 98, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 98, "usage_type": "name" }, { "api_name": "numpy.zeros", "line_number": 123, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 124, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 136, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 144, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 145, "usage_type": "call" }, { "api_name": "numpy.sqrt", "line_number": 155, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 160, "usage_type": "call" }, { "api_name": "random.randrange", "line_number": 163, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 171, "usage_type": "call" }, { "api_name": "numpy.isnan", "line_number": 173, "usage_type": "call" }, { "api_name": "random.randrange", "line_number": 174, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 180, "usage_type": "call" }, { "api_name": "numpy.argmin", "line_number": 184, "usage_type": "call" }, { "api_name": "numpy.unique", "line_number": 185, "usage_type": "call" }, { "api_name": "numpy.array_equal", "line_number": 198, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 206, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 207, "usage_type": "call" }, { "api_name": "numpy.copy", "line_number": 211, "usage_type": "call" }, { "api_name": "numpy.copy", "line_number": 212, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 235, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 247, "usage_type": "call" }, { "api_name": "numpy.where", "line_number": 248, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 308, "usage_type": "call" }, { "api_name": "scipy.spatial.distance.euclidean", "line_number": 311, "usage_type": "call" }, { "api_name": "scipy.spatial.distance", "line_number": 311, "usage_type": "name" } ]
327779012
from flask import Blueprint, request, jsonify, make_response from app import db from dotenv import load_dotenv from app.models.card import Card from app.models.board import Board load_dotenv() cards_bp = Blueprint('cards', __name__) boards_bp = Blueprint('boards', __name__) @boards_bp.route('/') def root(): return ('''<h1>Mango Mania</h1>''') @boards_bp.route('/boards', methods=["GET", "POST"], strict_slashes = False) def handle_boards(): if request.method == "GET": boards = Board.query.all() boards_response = [] for board in boards: boards_response.append({ "board_id": board.board_id, "title": board.title, "owner": board.owner, }) return jsonify(boards_response) elif request.method == "POST": request_body = request.get_json() title = request_body.get("title") owner=request_body.get("owner") if "title" not in request_body or "owner" not in request_body: return jsonify({"details": "Invalid data"}), 400 new_board = Board(title=title, owner=owner) db.session.add(new_board) db.session.commit() commited_board = {"board": {"board_id": new_board.board_id, "title": new_board.title, "owner": new_board.owner }} return jsonify(commited_board), 201 @boards_bp.route("/boards/<board_id>", methods=["GET", "DELETE"]) def handle_board(board_id): board = Board.query.get_or_404(board_id) if request.method == "GET": selected_board = {"board": {"board_id": board.board_id, "title": board.title, "owner": board.owner, }} return jsonify(selected_board),200 elif request.method == "DELETE": db.session.delete(board) db.session.commit() board_response_body = {"details": f'Board number {board.board_id} "{board.title}" successfully deleted'} return jsonify(board_response_body),200 @cards_bp.route("/boards/<board_id>/cards", methods=["GET","POST"]) def handle_cards(board_id): board = Board.query.get(board_id) if request.method == "GET": cards = board.cards cards_response = [] for card in cards: cards_response.append({ "card_id": card.card_id, "message": card.message, "votes": card.like_count, }) return jsonify(cards_response) elif request.method == "POST": request_body = request.get_json() if "message" not in request_body: return jsonify({"details": "Invalid data"}), 400 new_card = Card(message=request_body["message"], like_count=0, board_id=board.board_id) db.session.add(new_card) db.session.commit() commited_card = {"card": { "card_id": new_card.card_id, "message": new_card.message, "votes": new_card.like_count, "board_id": new_card.board_id }} return jsonify(commited_card), 201 @cards_bp.route("/<card_id>/votes", methods=["PATCH"]) def handle_card_like(card_id): card = Card.query.get_or_404(card_id) vote = request.args.get("like_count") card.like_count += int(vote) db.session.commit() response_body = { "card": { "card_id": card.card_id, "message": card.message, "votes": card.like_count, } } return jsonify(response_body), 200 @cards_bp.route("/<card_id>", methods=["DELETE"]) def handle_card_del(card_id): card = Card.query.get_or_404(card_id) db.session.delete(card) db.session.commit() cards_response_body = {"details": f'card {card.card_id} "{card.message}" successfully deleted'} return jsonify(cards_response_body),200
null
app/routes.py
routes.py
py
3,857
python
en
code
null
code-starcoder2
83
[ { "api_name": "dotenv.load_dotenv", "line_number": 7, "usage_type": "call" }, { "api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call" }, { "api_name": "flask.Blueprint", "line_number": 10, "usage_type": "call" }, { "api_name": "flask.request.method", "line_number": 18, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 18, "usage_type": "name" }, { "api_name": "app.models.board.Board.query.all", "line_number": 19, "usage_type": "call" }, { "api_name": "app.models.board.Board.query", "line_number": 19, "usage_type": "attribute" }, { "api_name": "app.models.board.Board", "line_number": 19, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 28, "usage_type": "call" }, { "api_name": "flask.request.method", "line_number": 30, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 30, "usage_type": "name" }, { "api_name": "flask.request.get_json", "line_number": 31, "usage_type": "call" }, { "api_name": "flask.request", "line_number": 31, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 35, "usage_type": "call" }, { "api_name": "app.models.board.Board", "line_number": 36, "usage_type": "call" }, { "api_name": "app.db.session.add", "line_number": 38, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 38, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 38, "usage_type": "name" }, { "api_name": "app.db.session.commit", "line_number": 39, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 39, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 39, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 45, "usage_type": "call" }, { "api_name": "app.models.board.Board.query.get_or_404", "line_number": 49, "usage_type": "call" }, { "api_name": "app.models.board.Board.query", "line_number": 49, "usage_type": "attribute" }, { "api_name": "app.models.board.Board", "line_number": 49, "usage_type": "name" }, { "api_name": "flask.request.method", "line_number": 50, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 50, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 56, "usage_type": "call" }, { "api_name": "flask.request.method", "line_number": 57, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 57, "usage_type": "name" }, { "api_name": "app.db.session.delete", "line_number": 58, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 58, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 58, "usage_type": "name" }, { "api_name": "app.db.session.commit", "line_number": 59, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 59, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 59, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 61, "usage_type": "call" }, { "api_name": "app.models.board.Board.query.get", "line_number": 65, "usage_type": "call" }, { "api_name": "app.models.board.Board.query", "line_number": 65, "usage_type": "attribute" }, { "api_name": "app.models.board.Board", "line_number": 65, "usage_type": "name" }, { "api_name": "flask.request.method", "line_number": 67, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 67, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 76, "usage_type": "call" }, { "api_name": "flask.request.method", "line_number": 78, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 78, "usage_type": "name" }, { "api_name": "flask.request.get_json", "line_number": 79, "usage_type": "call" }, { "api_name": "flask.request", "line_number": 79, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 82, "usage_type": "call" }, { "api_name": "app.models.card.Card", "line_number": 84, "usage_type": "call" }, { "api_name": "app.db.session.add", "line_number": 85, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 85, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 85, "usage_type": "name" }, { "api_name": "app.db.session.commit", "line_number": 86, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 86, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 86, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 93, "usage_type": "call" }, { "api_name": "app.models.card.Card.query.get_or_404", "line_number": 97, "usage_type": "call" }, { "api_name": "app.models.card.Card.query", "line_number": 97, "usage_type": "attribute" }, { "api_name": "app.models.card.Card", "line_number": 97, "usage_type": "name" }, { "api_name": "flask.request.args.get", "line_number": 98, "usage_type": "call" }, { "api_name": "flask.request.args", "line_number": 98, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 98, "usage_type": "name" }, { "api_name": "app.db.session.commit", "line_number": 101, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 101, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 101, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 109, "usage_type": "call" }, { "api_name": "app.models.card.Card.query.get_or_404", "line_number": 113, "usage_type": "call" }, { "api_name": "app.models.card.Card.query", "line_number": 113, "usage_type": "attribute" }, { "api_name": "app.models.card.Card", "line_number": 113, "usage_type": "name" }, { "api_name": "app.db.session.delete", "line_number": 114, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 114, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 114, "usage_type": "name" }, { "api_name": "app.db.session.commit", "line_number": 115, "usage_type": "call" }, { "api_name": "app.db.session", "line_number": 115, "usage_type": "attribute" }, { "api_name": "app.db", "line_number": 115, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 117, "usage_type": "call" } ]
392552580
from rest_framework.viewsets import ModelViewSet from api import permissions class BaseModelViewSet(ModelViewSet): http_method_names = ('get', 'post', 'put', 'delete') lookup_field = 'id' authentication_required = True default_serializer_class = None action_serializers = dict() default_permission_classes = tuple() action_permissions = dict() list_filters = dict() def get_list_filters(self) -> dict: result = dict() for url_filter_key, query_filter_key in self.list_filters.items(): filter_val = self.request.GET.get(url_filter_key, None) if filter_val: result[query_filter_key] = filter_val return result def get_serializer_class(self, *args, **kwargs): serializer = self.action_serializers.get( self.action, self.default_serializer_class ) if not serializer: err = _(f'error getting serializer class for "{self.action}" action.') err += _('\n"default_serializer_class" also set to None') raise Exception(err) return serializer def get_permissions(self) -> tuple: result_permissions = list() if self.authentication_required: result_permissions.append(permissions.IsLoggedIn) if self.default_permission_classes: result_permissions = list(self.default_permission_classes) action_permissions = self.action_permissions.get(self.action, None) if action_permissions: result_permissions = list(action_permissions) return tuple([p() for p in result_permissions]) def get_queryset(self, *args, **kwargs): ordering = self.request.GET.get('ordering', None) if ordering: queryset = ( self.get_serializer() .Meta.model.objects .filter(**self.get_list_filters(*args, **kwargs)) .order_by(*ordering.split(',')) ) else: queryset = ( self.get_serializer() .Meta.model.objects .filter(**self.get_list_filters(*args, **kwargs)) ) return queryset
null
api/base_viewset.py
base_viewset.py
py
2,217
python
en
code
null
code-starcoder2
83
[ { "api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 4, "usage_type": "name" }, { "api_name": "api.permissions.IsLoggedIn", "line_number": 37, "usage_type": "attribute" }, { "api_name": "api.permissions", "line_number": 37, "usage_type": "name" } ]
277816006
"""PASCAL DOTA dataset.""" import os import xml.etree.ElementTree import tensorflow as tf import tensorflow_datasets.public_api as tfds _VOC_CITATION = """\ @misc{ding2021object, title={Object Detection in Aerial Images: A Large-Scale Benchmark and Challenges}, author={Jian Ding and Nan Xue and Gui-Song Xia and Xiang Bai and Wen Yang and Micheal Ying Yang and Serge Belongie and Jiebo Luo and Mihai Datcu and Marcello Pelillo and Liangpei Zhang}, year={2021}, eprint={2102.12219}, archivePrefix={arXiv}, primaryClass={cs.CV} } """ _VOC_DESCRIPTION = """ This dataset contains the data from the PASCAL Visual Object Classes Challenge, corresponding to the Classification and Detection competitions. In the Classification competition, the goal is to predict the set of labels contained in the image, while in the Detection competition the goal is to predict the bounding box and label of each individual object. WARNING: As per the official dataset, the test set of VOC2012 does not contain annotations. """ _VOC_CONFIG_DESCRIPTION = """ Created by Crowley """ _VOC_URL = "Oh Mr.Crowley" # Original site, it is down very often. # _VOC_DATA_URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc{year}/" # Data mirror: _VOC_LABELS = ( "sv", "lv", "boat", "plane", ) def _get_example_objects(annon_filepath): """Function to get all the objects from the annotation XML file.""" with tf.io.gfile.GFile(annon_filepath, "r") as f: root = xml.etree.ElementTree.parse(f).getroot() # Disable pytype to avoid attribute-error due to find returning # Optional[Element] # pytype: disable=attribute-error size = root.find("size") width = float(size.find("width").text) height = float(size.find("height").text) for obj in root.findall("object"): # Get object's label name. label = obj.find("name").text.lower() # Get object's bounding box bndbox = obj.find("bndbox") xmax = float(bndbox.find("xmax").text) xmin = float(bndbox.find("xmin").text) ymax = float(bndbox.find("ymax").text) ymin = float(bndbox.find("ymin").text) yield { "label": label, "bbox": tfds.features.BBox( ymin / height, xmin / width, ymax / height, xmax / width), } class Dota_VocConfig(tfds.core.BuilderConfig): """BuilderConfig for Voc.""" def __init__(self, year=None, filenames=None, has_test_annotations=True, **kwargs): self.year = '2020' self.filenames = filenames self.has_test_annotations = has_test_annotations super(Dota_VocConfig, self).__init__( name='DOTA', version=tfds.core.Version("1.0.0"), **kwargs) class Dota_Voc(tfds.core.GeneratorBasedBuilder): """DOTA dataset.""" MANUAL_DOWNLOAD_INSTRUCTIONS = """ Register into https://example.org/login to get the data. Place the `data.zip` file in the `manual_dir/`. """ BUILDER_CONFIGS = [ Dota_VocConfig( description=_VOC_CONFIG_DESCRIPTION.format( num_images=4771, num_objects=24640), filenames={ "dota_train": "dota_train.zip", "dota_test": "dota_test.zip", }, ), ] def _info(self): return tfds.core.DatasetInfo( builder=self, description=_VOC_DESCRIPTION, features=tfds.features.FeaturesDict({ "image": tfds.features.Image(), "image/filename": tfds.features.Text(), "objects": tfds.features.Sequence({ "label": tfds.features.ClassLabel(names=_VOC_LABELS), "bbox": tfds.features.BBoxFeature(), }), "labels": tfds.features.Sequence( tfds.features.ClassLabel(names=_VOC_LABELS)), }), # homepage=_VOC_URL.format(year=self.builder_config.year), # citation=_VOC_CITATION.format(year=self.builder_config.year), ) def _split_generators(self, dl_manager): archive_path = dl_manager.manual_dir / "data.tar" extracted_path = dl_manager.extract(archive_path) return{ 'train' : self._generate_examples( images_path=extracted_path / 'train_imgs', label_path=extracted_path / 'train_labels', ), 'test' : self._generate_examples( images_path=extracted_path / 'test_imgs', labels_path=extracted_path / 'test_labels', ), } def _generate_examples(self, images_path, labels_path): for labels_file in tf.io.gfile.listdir(labels_path): image_id = labels_file[:-4] image_path = os.path.join(images_path, image_id + '.jpg') label_path = os.path.join(labels_path, labels_file) example = self._generate_example(image_id, image_path, label_path) yield image_id, example def _generate_example(self, image_id, image_filepath, annon_filepath): objects = list(_get_example_objects(annon_filepath)) labels = sorted(set(obj["label"] for obj in objects)) return { "image": image_filepath, "image/filename": image_id + ".jpg", "objects": objects, "labels": labels, }
null
dota_voc.py
dota_voc.py
py
5,371
python
en
code
null
code-starcoder2
83
[ { "api_name": "tensorflow.io.gfile.GFile", "line_number": 49, "usage_type": "call" }, { "api_name": "tensorflow.io", "line_number": 49, "usage_type": "attribute" }, { "api_name": "xml.etree.ElementTree.etree.ElementTree.parse", "line_number": 50, "usage_type": "call" }, { "api_name": "xml.etree.ElementTree.etree", "line_number": 50, "usage_type": "attribute" }, { "api_name": "xml.etree.ElementTree", "line_number": 50, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.features.BBox", "line_number": 70, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.features", "line_number": 70, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 70, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.core", "line_number": 75, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 75, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.core.Version", "line_number": 85, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.core", "line_number": 85, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 85, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.core", "line_number": 89, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 89, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.core.DatasetInfo", "line_number": 109, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.core", "line_number": 109, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 109, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.features.FeaturesDict", "line_number": 112, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.features", "line_number": 112, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 112, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.features.Image", "line_number": 113, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.features", "line_number": 113, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 113, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.features.Text", "line_number": 114, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.features", "line_number": 114, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 114, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.features.Sequence", "line_number": 115, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.features", "line_number": 115, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 115, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.features.ClassLabel", "line_number": 116, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.features", "line_number": 116, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 116, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.features.BBoxFeature", "line_number": 117, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.features", "line_number": 117, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 117, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.features.Sequence", "line_number": 119, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.features", "line_number": 119, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 119, "usage_type": "name" }, { "api_name": "tensorflow_datasets.public_api.features.ClassLabel", "line_number": 120, "usage_type": "call" }, { "api_name": "tensorflow_datasets.public_api.features", "line_number": 120, "usage_type": "attribute" }, { "api_name": "tensorflow_datasets.public_api", "line_number": 120, "usage_type": "name" }, { "api_name": "tensorflow.io.gfile.listdir", "line_number": 144, "usage_type": "call" }, { "api_name": "tensorflow.io", "line_number": 144, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 147, "usage_type": "call" }, { "api_name": "os.path", "line_number": 147, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 148, "usage_type": "call" }, { "api_name": "os.path", "line_number": 148, "usage_type": "attribute" } ]
214823890
from __future__ import print_function, division from argparse import ArgumentParser import yaml import logging import copy import sys import os import time from subprocess import call from marmot.experiment.import_utils import call_for_each_element, build_object, build_objects, mk_tmp_dir from marmot.experiment.preprocessing_utils import create_contexts, flatten, contexts_to_features, tags_from_contexts, fit_binarizers, binarize from marmot.experiment.learning_utils import map_classifiers, predict_all from marmot.evaluation.evaluation_metrics import weighted_fmeasure, sequence_correlation, sequence_correlation_weighted from marmot.evaluation.evaluation_utils import compare_vocabulary from marmot.util.persist_features import persist_features from marmot.util.generate_crf_template import generate_crf_template from marmot.evaluation.evaluation_utils import write_res_to_file logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger('experiment_logger') ''' Learn a model with an external CRF tool: CRF++ or CRFSuite ''' def label_test(flat_labels, new_test_name, text_file, method_name): tag_map = {0: 'BAD', 1: 'OK'} new_test_plain = open(new_test_name+'.'+method_name+'.plain', 'w') new_test_ext = open(new_test_name+'.'+method_name+'.ext', 'w') start_idx = 0 for s_idx, txt in enumerate(open(text_file)): words = txt[:-1].decode('utf-8').strip().split() tag_seq = [tag_map[flat_labels[i]] for i in range(start_idx, len(words))] new_test_plain.write('%s\n' % ' '.join(tag_seq)) for t_idx, (tag, word) in enumerate(zip(tag_seq, words)): new_test_ext.write('%s\t%d\t%d\t%s\t%s\n' % (method_name, s_idx, t_idx, word.encode('utf-8'), tag)) def get_crfpp_output(out_file): predicted = [] for line in open(out_file): line = line.strip('\n').replace('\t', ' ') predicted.append(line.split(' ')[-1]) return predicted def main(config): workers = config['workers'] tmp_dir = config['tmp_dir'] if 'tmp_dir' in config else None tmp_dir = mk_tmp_dir(tmp_dir) time_stamp = str(time.time()) # REPRESENTATION GENERATION # main representations (source, target, tags) # training train_data_generators = build_objects(config['datasets']['training']) train_data = {} for gen in train_data_generators: data = gen.generate() for key in data: if key not in train_data: train_data[key] = [] train_data[key].extend(data[key]) # test test_data_generator = build_object(config['datasets']['test'][0]) test_data = test_data_generator.generate() logger.info("Train data keys: {}".format(train_data.keys())) logger.info("Train data sequences: {}".format(len(train_data['target']))) logger.info("Sample sequence: {}".format([w.encode('utf-8') for w in train_data['target'][0]])) # logger.info("Sample sequence: {}".format(train_data['similarity'][0])) # sys.exit() # additional representations if 'representations' in config: representation_generators = build_objects(config['representations']) else: representation_generators = [] for r in representation_generators: train_data = r.generate(train_data) test_data = r.generate(test_data) # borders = config['borders'] if 'borders' in config else False # if 'multiply_data_train' not in config: # pass # elif config['multiply_data_train'] == 'ngrams': # train_data = multiply_data_ngrams(train_data, borders=borders) # elif config['multiply_data_train'] == '1ton': # train_data = multiply_data(train_data, borders=borders) # elif config['multiply_data_train'] == 'duplicate': # train_data = multiply_data_base(train_data) # elif config['multiply_data_train'] == 'all': # train_data = multiply_data_all(train_data, borders=borders) # else: # print("Unknown 'multiply data train' value: {}".format(config['multiply_data_train'])) # logger.info("Extended train representations: {}".format(len(train_data['target']))) # logger.info("Simple test representations: {}".format(len(test_data['target']))) # if 'multiply_data_test' not in config: # pass # elif config['multiply_data_test'] == 'ngrams': # test_data = multiply_data_ngrams(test_data, borders=borders) # elif config['multiply_data_test'] == '1ton': # test_data = multiply_data(test_data, borders=borders) # else: # print("Unknown 'multiply data test' value: {}".format(config['multiply_data_test'])) # logger.info("Extended test representations: {}".format(len(test_data['target']))) logger.info('here are the keys in your representations: {}'.format(train_data.keys())) # the data_type is the format corresponding to the model of the data that the user wishes to learn data_type = config['contexts'] if 'contexts' in config else 'plain' test_contexts = create_contexts(test_data, data_type=data_type) test_contexts_seq = create_contexts(test_data, data_type='sequential') train_contexts = create_contexts(train_data, data_type=data_type) logger.info('Vocabulary comparison -- coverage for each dataset: ') logger.info(compare_vocabulary([train_data['target'], test_data['target']])) # END REPRESENTATION GENERATION # FEATURE EXTRACTION train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type) test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type) test_tags_seq = call_for_each_element(test_contexts_seq, tags_from_contexts, data_type='sequential') logger.info('creating feature extractors...') feature_extractors = build_objects(config['feature_extractors']) logger.info('mapping the feature extractors over the contexts for test...') test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type) logger.info('mapping the feature extractors over the contexts for train...') train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type) logger.info('number of training instances: {}'.format(len(train_features))) logger.info('number of testing instances: {}'.format(len(test_features))) logger.info('All of your features now exist in their raw representation, but they may not be numbers yet') # END FEATURE EXTRACTION # BEGIN CONVERTING FEATURES TO NUMBERS logger.info('binarization flag: {}'.format(config['features']['binarize'])) # flatten so that we can properly binarize the features if config['features']['binarize'] is True: logger.info('Binarizing your features...') all_values = [] if data_type == 'sequential': all_values = flatten(train_features) elif data_type == 'plain': all_values = train_features elif data_type == 'token': all_values = flatten(train_features.values()) feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()] features_num = len(feature_names) true_features_num = len(all_values[0]) logger.info('fitting binarizers...') binarizers = fit_binarizers(all_values) logger.info('binarizing test data...') test_features = call_for_each_element(test_features, binarize, [binarizers], data_type=data_type) logger.info('binarizing training data...') # TODO: this line hangs with alignment+w2v train_features = call_for_each_element(train_features, binarize, [binarizers], data_type=data_type) logger.info('All of your features are now scalars in numpy arrays') logger.info('training and test sets successfully generated') # the way that we persist depends upon the structure of the data (plain/sequence/token_dict) # TODO: remove this once we have a list containing all datasets if config['features']['persist']: if 'persist_format' in config['features']: persist_format = config['features']['persist_format'] else: persist_format = 'crf++' experiment_datasets = [{'name': 'test', 'features': test_features, 'tags': test_tags}, {'name': 'train', 'features': train_features, 'tags': train_tags}] feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()] if config['features']['persist_dir']: persist_dir = config['features']['persist_dir'] else: persist_dir = os.path.getcwd() logger.info('persisting your features to: {}'.format(persist_dir)) # for each dataset, write a file and persist the features for dataset_obj in experiment_datasets: persist_features(dataset_obj['name'], dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=persist_format) # BEGIN LEARNING # TODO: different sequence learning modules need different representation, we should wrap them in a class # TODO: create a consistent interface to sequence learners, will need to use *args and **kwargs because APIs are very different from sklearn.metrics import f1_score, precision_score, recall_score import numpy as np experiment_datasets = [{'name': 'test', 'features': test_features, 'tags': test_tags}, {'name': 'train', 'features': train_features, 'tags': train_tags}] feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()] print("FEATURE NAMES: ", feature_names) persist_dir = tmp_dir logger.info('persisting your features to: {}'.format(persist_dir)) # for each dataset, write a file and persist the features if 'persist_format' not in config: config['persist_format'] = 'crf_suite' for dataset_obj in experiment_datasets: persist_features(dataset_obj['name']+time_stamp, dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=config['persist_format']) feature_num = len(train_features[0][0]) train_file = os.path.join(tmp_dir, 'train'+time_stamp+'.crf') test_file = os.path.join(tmp_dir, 'test'+time_stamp+'.crf') tag_map = {u'OK': 1, u'BAD': 0, 0: 0, 1: 1} if config['persist_format'] == 'crf++': # generate a template for CRF++ feature extractor generate_crf_template(feature_num, 'template', tmp_dir) # train a CRF++ model call(['crf_learn', '-a', 'MIRA', os.path.join(tmp_dir, 'template'), train_file, os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp)]) # tag a test set call(['crf_test', '-m', os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp), '-o', test_file+'.tagged', test_file]) elif config['persist_format'] == 'crf_suite': crfsuite_algorithm = config['crfsuite_algorithm'] call(['crfsuite', 'learn', '-a', crfsuite_algorithm, '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), train_file]) test_out = open(test_file+'.tagged', 'w') call(['crfsuite', 'tag', '-tr', '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), test_file], stdout=test_out) test_out.close() else: print("Unknown persist format: {}".format(config['persist_format'])) # parse CRFSuite output flattened_ref, flattened_hyp = [], [] tag_map = {'OK': 1, 'BAD': 0} for line in open(test_file+'.tagged'): if line == "\n": continue chunks = line.strip('\n').split('\t') if len(chunks) != 2: continue try: flattened_ref.append(tag_map[chunks[-2]]) flattened_hyp.append(tag_map[chunks[-1]]) except KeyError: continue print("Ref, hyp: ", len(flattened_ref), len(flattened_hyp)) logger.info('Structured prediction f1: ') print(f1_score(flattened_ref, flattened_hyp, average=None)) print(f1_score(flattened_ref, flattened_hyp, average='weighted', pos_label=None)) logger.info("Sequence correlation: ") # print(sequence_correlation_weighted(y_test, structured_hyp, verbose=True)[1]) if __name__ == '__main__': parser = ArgumentParser() parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).") parser.add_argument("-a", help="crfsuite algorithm") args = parser.parse_args() experiment_config = {} # Experiment hyperparams cfg_path = args.configuration_file # read configuration file with open(cfg_path, "r") as cfg_file: experiment_config = yaml.load(cfg_file.read()) experiment_config['crfsuite_algorithm'] = args.a main(experiment_config)
null
marmot/experiment/crf_experiment.py
crf_experiment.py
py
12,883
python
en
code
null
code-starcoder2
83
[ { "api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call" }, { "api_name": "logging.INFO", "line_number": 21, "usage_type": "attribute" }, { "api_name": "logging.getLogger", "line_number": 22, "usage_type": "call" }, { "api_name": "marmot.experiment.import_utils.mk_tmp_dir", "line_number": 53, "usage_type": "call" }, { "api_name": "time.time", "line_number": 54, "usage_type": "call" }, { "api_name": "marmot.experiment.import_utils.build_objects", "line_number": 59, "usage_type": "call" }, { "api_name": "marmot.experiment.import_utils.build_object", "line_number": 68, "usage_type": "call" }, { "api_name": "marmot.experiment.import_utils.build_objects", "line_number": 79, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.create_contexts", "line_number": 117, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.create_contexts", "line_number": 118, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.create_contexts", "line_number": 119, "usage_type": "call" }, { "api_name": "marmot.evaluation.evaluation_utils.compare_vocabulary", "line_number": 122, "usage_type": "call" }, { "api_name": "marmot.experiment.import_utils.call_for_each_element", "line_number": 127, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.tags_from_contexts", "line_number": 127, "usage_type": "argument" }, { "api_name": "marmot.experiment.import_utils.call_for_each_element", "line_number": 128, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.tags_from_contexts", "line_number": 128, "usage_type": "argument" }, { "api_name": "marmot.experiment.import_utils.call_for_each_element", "line_number": 129, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.tags_from_contexts", "line_number": 129, "usage_type": "argument" }, { "api_name": "marmot.experiment.import_utils.build_objects", "line_number": 132, "usage_type": "call" }, { "api_name": "marmot.experiment.import_utils.call_for_each_element", "line_number": 134, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.contexts_to_features", "line_number": 134, "usage_type": "argument" }, { "api_name": "marmot.experiment.import_utils.call_for_each_element", "line_number": 136, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.contexts_to_features", "line_number": 136, "usage_type": "argument" }, { "api_name": "marmot.experiment.preprocessing_utils.flatten", "line_number": 151, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.flatten", "line_number": 155, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.fit_binarizers", "line_number": 162, "usage_type": "call" }, { "api_name": "marmot.experiment.import_utils.call_for_each_element", "line_number": 164, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.binarize", "line_number": 164, "usage_type": "argument" }, { "api_name": "marmot.experiment.import_utils.call_for_each_element", "line_number": 167, "usage_type": "call" }, { "api_name": "marmot.experiment.preprocessing_utils.binarize", "line_number": 167, "usage_type": "argument" }, { "api_name": "os.path.getcwd", "line_number": 185, "usage_type": "call" }, { "api_name": "os.path", "line_number": 185, "usage_type": "attribute" }, { "api_name": "marmot.util.persist_features.persist_features", "line_number": 189, "usage_type": "call" }, { "api_name": "marmot.util.persist_features.persist_features", "line_number": 208, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 211, "usage_type": "call" }, { "api_name": "os.path", "line_number": 211, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 212, "usage_type": "call" }, { "api_name": "os.path", "line_number": 212, "usage_type": "attribute" }, { "api_name": "marmot.util.generate_crf_template.generate_crf_template", "line_number": 217, "usage_type": "call" }, { "api_name": "subprocess.call", "line_number": 219, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 219, "usage_type": "call" }, { "api_name": "os.path", "line_number": 219, "usage_type": "attribute" }, { "api_name": "subprocess.call", "line_number": 221, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 221, "usage_type": "call" }, { "api_name": "os.path", "line_number": 221, "usage_type": "attribute" }, { "api_name": "subprocess.call", "line_number": 224, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 224, "usage_type": "call" }, { "api_name": "os.path", "line_number": 224, "usage_type": "attribute" }, { "api_name": "subprocess.call", "line_number": 226, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 226, "usage_type": "call" }, { "api_name": "os.path", "line_number": 226, "usage_type": "attribute" }, { "api_name": "sklearn.metrics.f1_score", "line_number": 248, "usage_type": "call" }, { "api_name": "sklearn.metrics.f1_score", "line_number": 249, "usage_type": "call" }, { "api_name": "argparse.ArgumentParser", "line_number": 256, "usage_type": "call" }, { "api_name": "yaml.load", "line_number": 266, "usage_type": "call" } ]
142165523
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Apr 10 16:06:16 2020 @author: js """ import torch import torch.nn as nn from utils_down_resol import DownResolFunction, DownResolModule, DynamicDownResolModule, calcScale num_bits = 8 class BasicBlock(nn.Module): def __init__(self, in_planes, planes, blockstride=1): super(BasicBlock, self).__init__() self.blockstride = blockstride self.channel_change = in_planes != planes self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=blockstride, padding=1, bias=True) self.down_resol1 = DynamicDownResolModule(num_bits) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=True) self.down_resol2 = DynamicDownResolModule(num_bits) if blockstride > 1 or in_planes != planes: self.conv3 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=blockstride, padding=0, bias=True) self.down_resol3 = DynamicDownResolModule(num_bits) self.relu = nn.ReLU(inplace=True) def forward(self, x): residual = x out = self.conv1(x) out = self.down_resol1(out) out = self.relu(out) out = self.conv2(out) out = self.down_resol2(out) if self.blockstride > 1 or self.channel_change: residual = self.conv3(residual) residual = self.down_resol3(residual) out += residual out = self.relu(out) return out class ResBlock(nn.Module): def __init__(self, in_planes, inter_planes, planes, blockstride=1): super(ResBlock, self).__init__() self.blockstride = blockstride self.channel_change = in_planes != planes self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1, padding=0, bias=True) self.down_resol1 = DynamicDownResolModule(num_bits) self.conv2 = nn.Conv2d(inter_planes, inter_planes, kernel_size=3, stride=blockstride, padding=1, bias=True) self.down_resol2 = DynamicDownResolModule(num_bits) self.conv3 = nn.Conv2d(inter_planes, planes, kernel_size=1, stride=1, padding=0, bias=True) self.down_resol3 = DynamicDownResolModule(num_bits) if blockstride > 1 or in_planes != planes: self.conv4 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=blockstride, padding=0, bias=True) self.down_resol4 = DynamicDownResolModule(num_bits) self.relu = nn.ReLU(inplace=True) def forward(self, x): residual = x out = self.conv1(x) out = self.down_resol1(out) out = self.relu(out) out = self.conv2(out) out = self.down_resol2(out) out = self.relu(out) out = self.conv3(out) out = self.down_resol3(out) if self.blockstride > 1 or self.channel_change: residual = self.conv4(residual) residual = self.down_resol4(residual) out += residual out = self.relu(out) return out class Resnet34(nn.Module): def __init__(self, unet=False): super(Resnet34, self).__init__() self.relu = nn.ReLU(inplace=True) self.unet = unet self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=True) self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # Block1 self.block1_1 = BasicBlock(64, 64) self.block1_2 = BasicBlock(64, 64) self.block1_3 = BasicBlock(64, 64) # Block2 self.block2_1 = BasicBlock(64, 128, blockstride=2) self.block2_2 = BasicBlock(128, 128) self.block2_3 = BasicBlock(128, 128) self.block2_4 = BasicBlock(128, 128) # Block3 self.block3_1 = BasicBlock(128, 256, blockstride=2) self.block3_2 = BasicBlock(256, 256) self.block3_3 = BasicBlock(256, 256) self.block3_4 = BasicBlock(256, 256) self.block3_5 = BasicBlock(256, 256) self.block3_6 = BasicBlock(256, 256) # Block4 self.block4_1 = BasicBlock(256, 512, blockstride=2) self.block4_2 = BasicBlock(512, 512) self.block4_3 = BasicBlock(512, 512) self.avgpool =nn.AdaptiveAvgPool2d(output_size=(1,1)) self.fc = nn.Linear(512, 1000, bias=True) def forward(self, x): x = self.conv1(x) c1 = self.relu(x) x = self.maxpool(c1) x = self.block1_1(x) x = self.block1_2(x) c2 = self.block1_3(x) x = self.block2_1(c2) x = self.block2_2(x) x = self.block2_3(x) c3 = self.block2_4(x) x = self.block3_1(c3) x = self.block3_2(x) x = self.block3_3(x) x = self.block3_4(x) x = self.block3_5(x) c4 = self.block3_6(x) x = self.block4_1(c4) x = self.block4_2(x) c5 = self.block4_3(x) x = self.avgpool(c5) x = torch.flatten(x, 1) x = self.fc(x) if self.unet: return c1, c2, c3, c4, c5 else: return x class Resnet50(nn.Module): def __init__(self, unet=False): super(Resnet50, self).__init__() self.relu = nn.ReLU(inplace=True) self.unet=unet self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=True) self.down_resol1 = DynamicDownResolModule(num_bits) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # BLock1 self.block1_1 = ResBlock(64, 64, 256) self.block1_2 = ResBlock(256, 64, 256) self.block1_3 = ResBlock(256, 64, 256) # Block2 self.block2_1 = ResBlock(256, 128, 512, blockstride=2) self.block2_2 = ResBlock(512, 128, 512) self.block2_3 = ResBlock(512, 128, 512) self.block2_4 = ResBlock(512, 128, 512) # Block3 self.block3_1 = ResBlock(512, 256, 1024, blockstride=2) self.block3_2 = ResBlock(1024, 256, 1024) self.block3_3 = ResBlock(1024, 256, 1024) self.block3_4 = ResBlock(1024, 256, 1024) self.block3_5 = ResBlock(1024, 256, 1024) self.block3_6 = ResBlock(1024, 256, 1024) # Block4 self.block4_1 = ResBlock(1024, 512, 2048, blockstride=2) self.block4_2 = ResBlock(2048, 512, 2048) self.block4_3 = ResBlock(2048, 512, 2048) self.avgpool =nn.AdaptiveAvgPool2d(output_size=(1,1)) self.fc = nn.Linear(2048, 1000, bias=True) self.down_resol2 = DynamicDownResolModule(num_bits) def forward(self, x): x = self.conv1(x) x = self.down_resol1(x) c1 = self.relu(x) x = self.maxpool(c1) x = self.block1_1(x) x = self.block1_2(x) c2 = self.block1_3(x) x = self.block2_1(c2) x = self.block2_2(x) x = self.block2_3(x) c3 = self.block2_4(x) x = self.block3_1(c3) x = self.block3_2(x) x = self.block3_3(x) x = self.block3_4(x) x = self.block3_5(x) c4 = self.block3_6(x) x = self.block4_1(c4) x = self.block4_2(x) c5 = self.block4_3(x) x = self.avgpool(c5) x = torch.flatten(x, 1) x = self.fc(x) x = self.down_resol2(x) if self.unet: return c1, c2, c3, c4, c5 else: return x if __name__ == "__main__": net = Resnet34() x = torch.rand((2,3,224,224)) output = net(x) print(output.shape) net = Resnet50() x = torch.rand((2,3,224,224)) output = net(x) print(output.shape)
null
srcs/model_quant.py
model_quant.py
py
7,804
python
en
code
null
code-starcoder2
83
[ { "api_name": "torch.nn.Module", "line_number": 17, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 17, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 24, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 24, "usage_type": "name" }, { "api_name": "utils_down_resol.DynamicDownResolModule", "line_number": 25, "usage_type": "call" }, { "api_name": "torch.nn.Conv2d", "line_number": 26, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 26, "usage_type": "name" }, { "api_name": "utils_down_resol.DynamicDownResolModule", "line_number": 27, "usage_type": "call" }, { "api_name": "torch.nn.Conv2d", "line_number": 30, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 30, "usage_type": "name" }, { "api_name": "utils_down_resol.DynamicDownResolModule", "line_number": 31, "usage_type": "call" }, { "api_name": "torch.nn.ReLU", "line_number": 33, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 33, "usage_type": "name" }, { "api_name": "torch.nn.Module", "line_number": 54, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 54, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 62, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 62, "usage_type": "name" }, { "api_name": "utils_down_resol.DynamicDownResolModule", "line_number": 63, "usage_type": "call" }, { "api_name": "torch.nn.Conv2d", "line_number": 64, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 64, "usage_type": "name" }, { "api_name": "utils_down_resol.DynamicDownResolModule", "line_number": 65, "usage_type": "call" }, { "api_name": "torch.nn.Conv2d", "line_number": 66, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 66, "usage_type": "name" }, { "api_name": "utils_down_resol.DynamicDownResolModule", "line_number": 67, "usage_type": "call" }, { "api_name": "torch.nn.Conv2d", "line_number": 70, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 70, "usage_type": "name" }, { "api_name": "utils_down_resol.DynamicDownResolModule", "line_number": 71, "usage_type": "call" }, { "api_name": "torch.nn.ReLU", "line_number": 73, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 73, "usage_type": "name" }, { "api_name": "torch.nn.Module", "line_number": 97, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 97, "usage_type": "name" }, { "api_name": "torch.nn.ReLU", "line_number": 102, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 102, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 106, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 106, "usage_type": "name" }, { "api_name": "torch.nn.MaxPool2d", "line_number": 107, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 107, "usage_type": "name" }, { "api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 129, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 129, "usage_type": "name" }, { "api_name": "torch.nn.Linear", "line_number": 130, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 130, "usage_type": "name" }, { "api_name": "torch.flatten", "line_number": 158, "usage_type": "call" }, { "api_name": "torch.nn.Module", "line_number": 167, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 167, "usage_type": "name" }, { "api_name": "torch.nn.ReLU", "line_number": 172, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 172, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 175, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 175, "usage_type": "name" }, { "api_name": "utils_down_resol.DynamicDownResolModule", "line_number": 176, "usage_type": "call" }, { "api_name": "torch.nn.MaxPool2d", "line_number": 177, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 177, "usage_type": "name" }, { "api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 199, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 199, "usage_type": "name" }, { "api_name": "torch.nn.Linear", "line_number": 200, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 200, "usage_type": "name" }, { "api_name": "utils_down_resol.DynamicDownResolModule", "line_number": 201, "usage_type": "call" }, { "api_name": "torch.flatten", "line_number": 230, "usage_type": "call" }, { "api_name": "torch.rand", "line_number": 242, "usage_type": "call" }, { "api_name": "torch.rand", "line_number": 247, "usage_type": "call" } ]
139691351
# -*- coding: utf-8 -*- #!/usr/bin/python3 import sys import argparse class NewickTree(object): """Newick Tree.""" def __init__(self): self.root = None self.node_dict = {} def add_node(self, parent_name, node_name): # First input as root if not self.root: if node_name == parent_name: print("Cannot have duplicated parent node", file=sys.stderr) return newnode = NewickNode(parent_name, None) self.root = newnode self.node_dict[parent_name] = newnode newchild = newnode.add_child(node_name) self.node_dict[node_name] = newchild return if parent_name not in self.node_dict and self.root: print("Node_Parent %s not in the tree" %parent_name, file=sys.stderr) return if node_name in self.node_dict: print("Node %s already in the tree" % node_name, file=sys.stderr) return parent_node = self.node_dict[parent_name] childnode = parent_node.add_child(node_name) self.node_dict[node_name] = childnode def add_raw_node(self, parent_name, node_name): if node_name == parent_name: print("Cannot have duplicated parent node", file=sys.stderr) return if parent_name not in self.node_dict: newnode = NewickNode(parent_name, None) self.node_dict[parent_name] = newnode if node_name not in self.node_dict: newnode = NewickNode(node_name, None) self.node_dict[node_name] = newnode if self.node_dict[node_name].parent: print("Node %s cannot be assigned to both %s and %s" % ( node_name, parent_name, self.node_dict[node_name].parent.name), file=sys.stderr) return self.node_dict[parent_name].child_list.append(self.node_dict[node_name]) self.node_dict[node_name].parent = self.node_dict[parent_name] def print_tree(self): """Print tree""" cur_node = self.root if not cur_node: print("Empty Tree", file=sys.stderr) children_content = self.print_children(cur_node) print(children_content) def load_tree(self, raw_tree: str): """Load the Newick tree from the parentheses form.""" # Remove spaces/tabs raw_tree = "".join(raw_tree.split()) # Use depth dict to store the raw tree cur_node_ch = [] orphans = {} cur_depth = 0 for ch in raw_tree: if ch == "(" : cur_depth += 1 elif ch == ",": node = "".join(cur_node_ch) cur_node_ch = [] orphans.setdefault(cur_depth, []) orphans[cur_depth].append(node) # Add the child of current node if exists if cur_depth + 1 in orphans: for child in orphans[cur_depth + 1]: self.add_raw_node(node, child) del orphans[cur_depth + 1] elif ch == ")": node = "".join(cur_node_ch) cur_node_ch = [] orphans.setdefault(cur_depth, []) orphans[cur_depth].append(node) # Add the child of current node if exists if cur_depth + 1 in orphans: for child in orphans[cur_depth + 1]: self.add_raw_node(node, child) del orphans[cur_depth + 1] cur_depth -= 1 else: cur_node_ch.append(ch) # Add the child of the root node root = "".join(cur_node_ch) if 1 in orphans: for child in orphans[1]: self.add_raw_node(root, child) self.root = self.node_dict[root] def get_distance(self, nodename_a, nodename_b): """Get the distance between two nodes.""" try: assert nodename_a in self.node_dict except: print("%s is not in the tree" %nodename_a, file=sys.stderr) try: assert nodename_b in self.node_dict except: print("%s is not in the tree" %nodename_b, file=sys.stderr) node_a = self.node_dict[nodename_a] node_b = self.node_dict[nodename_b] trans_a = [nodename_a] trans_b = [nodename_b] cur_node = node_a while cur_node.parent: cur_node = cur_node.parent trans_a.append(cur_node.name) cur_node = node_b while cur_node.parent: cur_node = cur_node.parent trans_b.append(cur_node.name) for depth_a, node_name in enumerate(trans_a): if node_name in trans_b: depth_b = trans_b.index(node_name) break return depth_a + depth_b def print_children(self, cur_node): cur_children = cur_node.child_list if not cur_children: return cur_node.name children_content = [] for child in cur_children: children_content.append(self.print_children(child)) children_content = ",".join(children_content) children_content = "(%s)%s" %(children_content, cur_node.name) return children_content class NewickNode(object): """Node of Newick Tree""" def __init__(self, name, parent): self.name = name self.child_list = [] self.parent = parent def add_child(self, name): new_child = NewickNode(name, self) self.child_list.append(new_child) return new_child def build_print_newicktree(infile): """Test to build and print newick tree.""" n_tree = NewickTree() for line in infile: entry = line.split() if not entry: continue n_tree.add_node(entry[0], entry[1]) n_tree.print_tree() def dist_newicktree(infile): """Load Newick Tree in parentheses format.""" count = 0 for line in infile: count = count + 1 entry = line.strip().split(";") if not entry[0]: #newline continue elif len(entry) != 2: print("Input must be of format <newick_tree>; <int>,<int>. Line %i skipped." % count, \ file=sys.stderr) continue raw_tree = entry[0] nodes = entry[1].strip().split(",") if len(nodes) != 2 or nodes[0] == '' or nodes[1] == '': print("Requires two nodes to calculate distance. Line %i skipped." \ % count, file=sys.stderr) continue node_a = nodes[0].strip() node_b = nodes[1].strip() n_tree = NewickTree() n_tree.load_tree(raw_tree) print(n_tree.get_distance(node_a, node_b)) def main(infile): """Main func for Rosland""" dist_newicktree(infile) if __name__ == '__main__': #main() parser = argparse.ArgumentParser(description='Input must be of format \ <newick_tree>; <int>,<int>. For example, \ ((((6)5)2,(4,7)3)1)0; 3,6', prog='problem2.B.py') parser.add_argument('--infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help='Either enter a filename or \ enter input via stdin (default)') args = parser.parse_args() main(args.infile)
null
team_D1_zxu_ehuang/problem2.B.py
problem2.B.py
py
7,492
python
en
code
null
code-starcoder2
83
[ { "api_name": "sys.stderr", "line_number": 16, "usage_type": "attribute" }, { "api_name": "sys.stderr", "line_number": 25, "usage_type": "attribute" }, { "api_name": "sys.stderr", "line_number": 28, "usage_type": "attribute" }, { "api_name": "sys.stderr", "line_number": 36, "usage_type": "attribute" }, { "api_name": "sys.stderr", "line_number": 47, "usage_type": "attribute" }, { "api_name": "sys.stderr", "line_number": 56, "usage_type": "attribute" }, { "api_name": "sys.stderr", "line_number": 106, "usage_type": "attribute" }, { "api_name": "sys.stderr", "line_number": 110, "usage_type": "attribute" }, { "api_name": "sys.stderr", "line_number": 177, "usage_type": "attribute" }, { "api_name": "sys.stderr", "line_number": 183, "usage_type": "attribute" }, { "api_name": "argparse.ArgumentParser", "line_number": 199, "usage_type": "call" }, { "api_name": "argparse.FileType", "line_number": 203, "usage_type": "call" }, { "api_name": "sys.stdin", "line_number": 204, "usage_type": "attribute" } ]
573264646
import pytest from pyscipopt import Model, quicksum from pyscipopt.scip import Expr, ExprCons def test_string(): PI = 3.141592653589793238462643 NWIRES = 11 DIAMETERS = [0.207, 0.225, 0.244, 0.263, 0.283, 0.307, 0.331, 0.362, 0.394, 0.4375, 0.500] PRELOAD = 300.0 MAXWORKLOAD = 1000.0 MAXDEFLECT = 6.0 DEFLECTPRELOAD = 1.25 MAXFREELEN = 14.0 MAXCOILDIAM = 3.0 MAXSHEARSTRESS = 189000.0 SHEARMOD = 11500000.0 m = Model() coil = m.addVar('coildiam') wire = m.addVar('wirediam') defl = m.addVar('deflection', lb=DEFLECTPRELOAD / (MAXWORKLOAD - PRELOAD), ub=MAXDEFLECT / PRELOAD) ncoils = m.addVar('ncoils', vtype='I') const1 = m.addVar('const1') const2 = m.addVar('const2') volume = m.addVar('volume') y = [m.addVar('wire%d' % i, vtype='B') for i in range(NWIRES)] obj = 1.0 * volume m.setObjective(obj, 'minimize') m.addCons(PI/2*(ncoils + 2)*coil*wire**2 - volume == 0, name='voldef') # defconst1: coil / wire - const1 == 0.0 m.addCons(coil - const1*wire == 0, name='defconst1') # defconst2: (4.0*const1 - 1.0) / (4.0*const1 - 4.0) + 0.615 / const1 - const2 == 0.0 d1 = (4.0*const1 - 4.0) d2 = const1 m.addCons((4.0*const1 - 1.0)*d2 + 0.615*d1 - const2*d1*d2 == 0, name='defconst2') m.addCons(8.0*MAXWORKLOAD/PI*const1*const2 - MAXSHEARSTRESS*wire**2 <= 0.0, name='shear') # defdefl: 8.0/shearmod * ncoils * const1^3 / wire - defl == 0.0 m.addCons(8.0/SHEARMOD*ncoils*const1**3 - defl*wire == 0.0, name="defdefl") m.addCons(MAXWORKLOAD*defl + 1.05*ncoils*wire + 2.1*wire <= MAXFREELEN, name='freel') m.addCons(coil + wire <= MAXCOILDIAM, name='coilwidth') m.addCons(quicksum(c*v for (c,v) in zip(DIAMETERS, y)) - wire == 0, name='defwire') m.addCons(quicksum(y) == 1, name='selectwire') m.optimize() if __name__ == '__main__': test_string()
null
tests/test_nonlinear.py
test_nonlinear.py
py
1,907
python
en
code
null
code-starcoder2
83
[ { "api_name": "pyscipopt.Model", "line_number": 19, "usage_type": "call" }, { "api_name": "pyscipopt.quicksum", "line_number": 51, "usage_type": "call" }, { "api_name": "pyscipopt.quicksum", "line_number": 53, "usage_type": "call" } ]
69503971
from django.conf import settings from django.test import Client from django.test import LiveServerTestCase from selenium import webdriver from . import utils from .pop import goalsPage from .pop import indexPage from .pop import rolesPage from .pop import todosPage from .pop import navbar class InterfaceTests(LiveServerTestCase): def setUp(self): super(InterfaceTests, self).setUp() self.__initSelenium() self.goalsPage = goalsPage.goalsPage(selenium = self.selenium) self.indexPage = indexPage.indexPage(selenium = self.selenium) self.rolesPage = rolesPage.rolesPage(selenium = self.selenium) self.todosPage = todosPage.todosPage(selenium = self.selenium) self.navbar = navbar.navbar(selenium = self.selenium) self.testUtil = utils.TestUtil(selenium = self.selenium) def __initSelenium(self): self.selenium = webdriver.Chrome(settings.SELENIUM_CHROMEDRIVER) self.selenium.get(settings.MAIN_PAGE) def tearDown(self): self.selenium.quit() super(InterfaceTests, self).tearDown() def test_navbar_has_roles_button(self): self.navbar.clickRolesLink() self.testUtil.assertCurrentUrl(expectedUrl = self.rolesPage.getExpectedUrl()) def test_navbar_has_goals_button(self): self.navbar.clickGoalsLink() self.testUtil.assertCurrentUrl(expectedUrl = self.goalsPage.getExpectedUrl()) def test_navbar_has_todos_button(self): self.navbar.clickTodosLink() self.testUtil.assertCurrentUrl(expectedUrl = self.todosPage.getExpectedUrl())
null
weekPlaner/calendarManager/tests/tests_interface.py
tests_interface.py
py
1,597
python
en
code
null
code-starcoder2
83
[ { "api_name": "django.test.LiveServerTestCase", "line_number": 12, "usage_type": "name" }, { "api_name": "pop.goalsPage.goalsPage", "line_number": 17, "usage_type": "call" }, { "api_name": "pop.goalsPage", "line_number": 17, "usage_type": "name" }, { "api_name": "pop.indexPage.indexPage", "line_number": 18, "usage_type": "call" }, { "api_name": "pop.indexPage", "line_number": 18, "usage_type": "name" }, { "api_name": "pop.rolesPage.rolesPage", "line_number": 19, "usage_type": "call" }, { "api_name": "pop.rolesPage", "line_number": 19, "usage_type": "name" }, { "api_name": "pop.todosPage.todosPage", "line_number": 20, "usage_type": "call" }, { "api_name": "pop.todosPage", "line_number": 20, "usage_type": "name" }, { "api_name": "pop.navbar.navbar", "line_number": 21, "usage_type": "call" }, { "api_name": "pop.navbar", "line_number": 21, "usage_type": "name" }, { "api_name": "selenium.webdriver.Chrome", "line_number": 25, "usage_type": "call" }, { "api_name": "selenium.webdriver", "line_number": 25, "usage_type": "name" }, { "api_name": "django.conf.settings.SELENIUM_CHROMEDRIVER", "line_number": 25, "usage_type": "attribute" }, { "api_name": "django.conf.settings", "line_number": 25, "usage_type": "name" }, { "api_name": "django.conf.settings.MAIN_PAGE", "line_number": 26, "usage_type": "attribute" }, { "api_name": "django.conf.settings", "line_number": 26, "usage_type": "name" } ]
40642366
import time import tweepy as twitter import os superhour = time.localtime().tm_hour hour = superhour % 12 if hour == 0: hour = 12 sentence = "Tenho %d lรกgrima%s no canto do mostrador, %s nos Aรงores%s" if superhour >= 12: if hour == 1: sentence = sentence % (hour, "", "12 lรกgrimas", "") else: sentence = sentence % (hour, "s", "menos uma lรกgrima", "") else: if hour == 1: sentence = sentence % (hour, "", "12 lรกgrimas", ".") else: sentence = sentence % (hour, "s", "menos uma lรกgrima", ".") CONSUMER_KEY = os.getenv('CONSUMER_KEY') CONSUMER_SECRET = os.getenv('CONSUMER_SECRET') ACCESS_TOKEN = os.getenv('ACCESS_TOKEN') ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET') auth = twitter.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = twitter.API(auth) api.update_status(status=sentence)
null
bonga.py
bonga.py
py
876
python
en
code
null
code-starcoder2
83
[ { "api_name": "time.localtime", "line_number": 5, "usage_type": "call" }, { "api_name": "os.getenv", "line_number": 23, "usage_type": "call" }, { "api_name": "os.getenv", "line_number": 24, "usage_type": "call" }, { "api_name": "os.getenv", "line_number": 25, "usage_type": "call" }, { "api_name": "os.getenv", "line_number": 26, "usage_type": "call" }, { "api_name": "tweepy.OAuthHandler", "line_number": 28, "usage_type": "call" }, { "api_name": "tweepy.API", "line_number": 31, "usage_type": "call" } ]
194858390
__author__ = 'daniel' import matplotlib.pyplot as plt from scipy import ndimage import numpy as np import time import threading import random global my_data path = "/home/daniel/bt-img/test1/" img = ["ball-1.jpg", "ball-2.jpg", "ball-3.jpg", "ball-4.jpg", "ball-5.jpg"] my_data = ndimage.imread(path + random.choice(img)) class ComputeThread(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.nr = 1 self.i = 0 def run(self): global my_data while(self.i < 10): self.i += 1 read_file = random.choice(img) print("Compute iteration: " + str(self.i) + " | reading file: " + read_file) my_data = ndimage.imread(path + read_file) print("type: {0} | shape: {1}".format(type(my_data), my_data.shape)) time.sleep(1.0) class Graphic(): def __init__(self): self.i = 0 self.run() def run(self): global my_data f, axarr = plt.subplots(2, 2) self.do_plot(axarr) plt.pause(0.01) while self.i < 10: self.i += 1 print("Graphic iteration: " + str(self.i)) self.do_plot(axarr) plt.draw() plt.pause(0.01) time.sleep(1.0) plt.close() def do_plot(self, axarr): axarr[0, 0].imshow(my_data, interpolation="none") axarr[0, 1].imshow(my_data[:, :, 0], interpolation="none") axarr[1, 0].imshow(my_data[:, :, 1], interpolation="none") axarr[1, 1].imshow(my_data[:, :, 2], interpolation="none") CT = ComputeThread() CT.start() G = Graphic() print("done!")
null
code/img_handler/matplotlib_show_subplots.py
matplotlib_show_subplots.py
py
1,678
python
en
code
null
code-starcoder2
83
[ { "api_name": "scipy.ndimage.imread", "line_number": 16, "usage_type": "call" }, { "api_name": "scipy.ndimage", "line_number": 16, "usage_type": "name" }, { "api_name": "random.choice", "line_number": 16, "usage_type": "call" }, { "api_name": "threading.Thread", "line_number": 19, "usage_type": "attribute" }, { "api_name": "threading.Thread.__init__", "line_number": 22, "usage_type": "call" }, { "api_name": "threading.Thread", "line_number": 22, "usage_type": "attribute" }, { "api_name": "random.choice", "line_number": 31, "usage_type": "call" }, { "api_name": "scipy.ndimage.imread", "line_number": 33, "usage_type": "call" }, { "api_name": "scipy.ndimage", "line_number": 33, "usage_type": "name" }, { "api_name": "time.sleep", "line_number": 35, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.subplots", "line_number": 47, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.pause", "line_number": 51, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.draw", "line_number": 57, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.pause", "line_number": 58, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name" }, { "api_name": "time.sleep", "line_number": 59, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.close", "line_number": 61, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name" } ]
596805772
import os import sys lib_dir = os.path.expanduser('~/proj/ml_lib/') sys.path.append(lib_dir) import numpy as np import matplotlib.pyplot as plt from kernel.kernel_ridge import KernelRidge from kernel.kernels import gaussian_kernel N = 1000 sigma_noise = 0.1 X = np.linspace(0, 2 * np.pi, N).reshape(-1, 1) y = np.sin(X) y_noised = y + np.random.normal(0, sigma_noise, X.shape) kr = KernelRidge() sigma = 0.5 pen = 0.1 K = gaussian_kernel(X, sigma) a = kr.fit(K, y_noised, pen) y_predicted = K.dot(a) plt.scatter(X, y_noised, marker='x', c = 'red') plt.plot(X, y_predicted) plt.show()
null
demo/kernel_ridge_demo.py
kernel_ridge_demo.py
py
592
python
en
code
null
code-starcoder2
83
[ { "api_name": "os.path.expanduser", "line_number": 4, "usage_type": "call" }, { "api_name": "os.path", "line_number": 4, "usage_type": "attribute" }, { "api_name": "sys.path.append", "line_number": 5, "usage_type": "call" }, { "api_name": "sys.path", "line_number": 5, "usage_type": "attribute" }, { "api_name": "numpy.linspace", "line_number": 15, "usage_type": "call" }, { "api_name": "numpy.pi", "line_number": 15, "usage_type": "attribute" }, { "api_name": "numpy.sin", "line_number": 16, "usage_type": "call" }, { "api_name": "numpy.random.normal", "line_number": 17, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 17, "usage_type": "attribute" }, { "api_name": "kernel.kernel_ridge.KernelRidge", "line_number": 19, "usage_type": "call" }, { "api_name": "kernel.kernels.gaussian_kernel", "line_number": 23, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.scatter", "line_number": 28, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name" }, { "api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name" } ]
192561299
from flask import Flask, render_template, request #from nocache import nocache import ast, requests, boto3, os from datetime import datetime import csv # import plots,copy,random,string # access_key_id = 'ASIA37PBWIRNEDGEMZO2' # secret_access_key = "+QT0v4mCGOxm7" # session_token = '+dP/EIgDIcZgOUcuzlLHRY9glf+/SYI6CBvnEYPOtumiuqdCgHJZLUrYjZx0AsENG9BMgodHcFk8u/cSppfhzjYwWbGKzyBuNiWvpQrpNwVrpO+O+J3ORApG0/jnIv8ibN8oxqLa4QU=' # app = Flask(__name__) # app.config['SECRET_KEY'] = '0f9dc56d2288afa6e1' # app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 # # # types = {"Government" : 0, "Education" : 0, "Invalid URL" : 0, "Social Media" : 0, # "News" : 0, "Blog" : 0, "Commercial Health" : 0, "Fake News" : 0, "Scientific" : 0, # "Videos" : 0, "Commercial" : 0, "HealthMagazines" : 0, "HealthInsurance" : 0, # "NMPSocieties" : 0, "None Found" : 0} # # table_dict = {"vaccine" : (copy.deepcopy(types),copy.deepcopy(types),copy.deepcopy(types)), # "abortion" : (copy.deepcopy(types),copy.deepcopy(types),copy.deepcopy(types)), # "weed" : (copy.deepcopy(types),copy.deepcopy(types),copy.deepcopy(types)), # "ecig" : (copy.deepcopy(types),copy.deepcopy(types),copy.deepcopy(types)), # "aids" : (copy.deepcopy(types),copy.deepcopy(types),copy.deepcopy(types))} # region = 'us-east-2' # session = boto3.session.Session() aws_secret = 'aXL3ndaT/BilMryekS' aws_pub = 'AKIAIFL3OJZQZDFSJOQQ' db = boto3.resource('dynamodb',aws_access_key_id=aws_pub,aws_secret_access_key=aws_secret, region_name=region) # img_folder = '/home/trevorm4/mysite/static/img/' # No caching at all for API endpoints. # @app.after_request # def add_header(response): # """ # Add headers to both force latest IE rendering engine or Chrome Frame, # and also to cache the rendered page for 10 minutes. # """ # response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1' # response.headers['Cache-Control'] = 'public, max-age=0' # return response # # """ # Uses Twitter oEmbed api to fetch the html code for embedding the tweet. # Uses fix_twitter_html_response because the api escapes '/', even though its not necessary,which # messes up the code # # @param tweet_url : url of the tweet to fetch html code for # @return html code to embed passed tweet # """ # def get_embed_html(tweet_url): # r = requests.get('https://publish.twitter.com/oembed?url='+tweet_url) # r = fix_malformed_dict_string(r.text) # return fix_twitter_html_response((ast.literal_eval(r)['html'])) # # def fix_twitter_html_response(html): # new_string = "" # for i in range(len(html)): # if not (html[i] == "\\" and html[i:i+2] == '\\/'): # new_string += html[i] # return new_string # # """ # Some of the JSONs have false/true/null instead of False/True/None # So this method just replaces all of false/true/null with False/True/None so ast.literal_eval can # parse it extremely easily # """ # def fix_malformed_dict_string(dict_string): # no_null = dict_string.replace('null','None') # no_false = no_null.replace('false','False') # no_true = no_false.replace('true','True') # return no_true # # def get_latest_tweets(table_name,num_tweets,topic): # table = db.Table(table_name) # response = table.scan() # tweets = [] # # for item in response['Items']: # if item['topic'] == topic.lower(): # tweets.append(get_embed_html(item['TweetID'])) # return tweets[:num_tweets] # # def update_counts(table_name,dictionary): # table = db.Table(table_name) # # response = table.scan() # # for item in response["Items"]: # category = item['topic'] # url_type = item['type'] # dictionary[category][0][url_type] += 1 #overall count # if item['user_type'] == 'Bot': # dictionary[category][2][url_type] += 1 # else: # dictionary[category][1][url_type] += 1 # # def update_plots(category): # update_counts('URLsTable',table_dict) # cat = category # plots.type_histogram_overall(table_dict[cat][0],True, category + '_PLOT_'+ generate_random_string(10) + '.png') # plots.type_histogram_overall(table_dict[cat][2],True, category + '_PLOT_'+ generate_random_string(10) + '_human_' +'.png') # plots.type_histogram_overall(table_dict[cat][2],True, category + '_PLOT_'+ generate_random_string(10) + '_bot_'+ '.png') # # def generate_random_string(n): # return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(n)) # # def get_plot_html(category): # existing_files = [file for file in os.listdir(img_folder) if file.find(category + '_PLOT_') == 0] # # for file in existing_files: # os.remove(os.path.join(img_folder,file)) # update_plots(category) # # files = os.listdir(img_folder) # files = [file for file in files if file.find(category + '_PLOT_') == 0] # html_blocks = [] # # for file in files: # print('<img src=\"' + img_folder + file + '\" alt=\"' + file[:file.find('.png')] + '\">') # html_blocks.append('<img src=\"' + '/static/img/' + file + '\" alt=\"' + file[:file.find('.png')] + '\">') # return html_blocks #app route @app.route('/', methods=['POST', 'GET']) def dash(): # with open('AllTweet.csv', encoding="utf8") as f: # rd = csv.reader(f) # for row in rd: # data_list.append(row) # Get the full table table = db.Table('AllTweet') # With Scan get the full table data data = table.scan() # get the first item data_One = data['Items'] # initialize the varialbe outside of loop to avoid error when the app loads without any result or first time when the app loads, there will be no result result_list = [] positive = 0 negative = 0 neutral =0 # get the search topic search_topic = '' if request.method == 'POST': search_topic = request.form['topic'] for item in data_One: if item['topic'] == search_topic: result_list.append(item) # =================================================== # calculate the neautral, negative, positive number # ================================================ for item in result_list: if item['sentiment'] == "Neutral": neutral += 1 if item['sentiment'] == "Negative": negative += 1 if item['sentiment'] == "Positive": positive += 1 # ================================================== # calculate the number of topic happended each day # =============================================== all_day_list = [] unique_day_list = [] for item in result_list: all_day_list.append(item['created_at'].split()[0].split('-')[-1]) for item in all_day_list: if item not in unique_day_list: unique_day_list.append(item) each_day_topic_list = [] day_first = 0 day_second = 0 for item in result_list: if item['created_at'].split()[0].split('-')[-1] == unique_day_list[0]: day_first += 1 if day_first != len(result_list): day_second = len(result_list) - day_first # ================================================================================ # getting url support number for each search topic from URLsTable table from aws # ============================================================================= # Get the full table url_table = db.Table('URLsTable') # With Scan get the full table data url_data = url_table.scan() # get the first item url_data_One = data['Items'] total_url_for_search_topic = [] url_supported_link = 0 url_non_supported_link = 0 # getting all urls for search topic for item in url_data_One: if item['topic'] == search_topic: for key, val in item.items(): try: if "/" in val: total_url_for_search_topic.append(val) except: pass # getting total url number which contain the serach topic word for item in total_url_for_search_topic: if search_topic in item: url_supported_link += 1 # getting non_supported_url number url_non_supported_link = len(total_url_for_search_topic) - url_supported_link # ================================================= # Calculating total unique account for each topic # ============================================== all_account_list = [] unique_account_list = [] total_account = 0 topics_per_account = 0 for item in result_list: all_account_list.append(item['username']) for item in all_account_list: if item not in unique_account_list: unique_account_list.append(item) total_account = len(unique_account_list) if result_list: topics_per_account = round((len(result_list) / total_account), 2) # ============================================== # Calculating total BOT account for each topic # =========================================== total_bot_account = 0 for item in result_list: if item['user_type'] == 'Bot': total_bot_account += 1 # ============================================== # Calculating % of true account for each topic # =========================================== true_account_percentage_full_decimal = 0 true_account_percentage = 0 if result_list: true_account_percentage_full_decimal = 100 - ((total_bot_account * 100) / len(result_list)) true_account_percentage = round(true_account_percentage_full_decimal, 2) # =============================================================== # Calculating url & non-url for each sentiment for Search topic # =========================================================== negative_result_list = [] positive_result_list = [] neutral_result_list = [] if result_list: for item in result_list: if item['sentiment'] == "Negative": negative_result_list.append(item['TweetID']) if item['sentiment'] == "Positive": positive_result_list.append(item['TweetID']) if item['sentiment'] == "Neutral": neutral_result_list.append(item['TweetID']) negative_url_result_list = [] positive_url_result_list = [] neutral_url_result_list = [] negative_with_url = 0 negative_with_out_url = 0 positive_with_url = 0 positive_with_out_url = 0 neutral_with_url = 0 neutral_with_out_url = 0 for item in negative_result_list: for id in url_data_One: if item == id['TweetID']: negative_url_result_list.append(id) for item in positive_result_list: for id in url_data_One: if item == id['TweetID']: positive_url_result_list.append(id) for item in neutral_result_list: for id in url_data_One: if item == id['TweetID']: neutral_url_result_list.append(id) if search_topic: for item in negative_url_result_list: for key, val in item.items(): try: if "/" in val: if search_topic in val: negative_with_url += 1 except: pass negative_with_out_url = len(negative_url_result_list) - negative_with_url for item in positive_url_result_list: for key, val in item.items(): try: if "/" in val: if search_topic in val: positive_with_url += 1 except: pass positive_with_out_url = len(positive_url_result_list) - positive_with_url for item in neutral_url_result_list: for key, val in item.items(): try: if "/" in val: if search_topic in val: neutral_with_url += 1 except: pass neutral_with_out_url = len(neutral_url_result_list) - neutral_with_url return render_template('dashboard.html', result_list=result_list, positive=positive, negative=negative, neutral=neutral, search_topic=search_topic, unique_day_list=unique_day_list, url_supported_link=url_supported_link, url_non_supported_link=url_non_supported_link, day_first=day_first, day_second=day_second, total_account=total_account, topics_per_account=topics_per_account, total_bot_account=total_bot_account, true_account_percentage=true_account_percentage, negative_with_url=negative_with_url, negative_with_out_url=negative_with_out_url, positive_with_url=positive_with_url, positive_with_out_url=positive_with_out_url, neutral_with_url=neutral_with_url, neutral_with_out_url=neutral_with_out_url) @app.route('/analyze') def analyze(): return render_template('dashboard.html',tweets=get_latest_tweets('AllTweet',15,'aids')) @app.route('/about') def about(): return render_template('about.html') @app.route('/graph/') def graph(): # table = db.Table('AllTweet') # tabdata = table.creation_date_time # # num_of_item = table.item_count # data = table.scan() # data_One = data['Items'] # result_list = [] # topic_list = [] # unique_topic_list = [] # created_at_column_data = [] # # # for item in data_One: # # if item['topic'] == 'weed': # # result_list.append(item) # topic_list.append(item['topic']) # created_at_column_data.append(item['created_at']) # # # getting the unique topic list # for item in topic_list: # if item not in unique_topic_list: # unique_topic_list.append(item) table = db.Table('URLsTable') data = table.scan() data_One = data['Items'] # num_of_item = len(result_list) return render_template('graph.html', data_One=data_One) # # @app.route('/vaccines') # def vaccines(): # #tweetss = get_latest_tweets('tweets_by_ID',15) # graphs = get_plot_html("vaccine") # return render_template('vaccines.html',charts=graphs) # # @app.route('/abortion') # def abortion(): # #tweetss = get_latest_tweets('abortion_tweets_by_ID',15) # graphs = get_plot_html("abortion") # return render_template('abortion.html',charts=graphs) # @app.route('/marijuana') # def weed(): # #tweetss = get_latest_tweets('weed_tweets_by_ID',15) # graphs = get_plot_html('weed') # return render_template('weed.html', charts = graphs) # @app.route('/aids') # def aids(): # #tweetss = get_latest_tweets('aids_tweets_by_ID',15) # graphs = get_plot_html('aids') # return render_template('aids.html', charts = graphs) # @app.route('/ecigs') # def ecigs(): # #tweetss = get_latest_tweets('ecig_tweets_by_ID',15) # graphs = get_plot_html('ecig') # return render_template('ecigs.html', charts = graphs) if __name__ == "__main__": app.run(debug=True)
null
app.py
app.py
py
14,965
python
en
code
null
code-starcoder2
83
[ { "api_name": "flask.Flask", "line_number": 11, "usage_type": "call" }, { "api_name": "boto3.resource", "line_number": 31, "usage_type": "call" }, { "api_name": "flask.request.method", "line_number": 153, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 153, "usage_type": "name" }, { "api_name": "flask.request.form", "line_number": 154, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 154, "usage_type": "name" }, { "api_name": "flask.render_template", "line_number": 341, "usage_type": "call" }, { "api_name": "flask.render_template", "line_number": 350, "usage_type": "call" }, { "api_name": "flask.render_template", "line_number": 355, "usage_type": "call" }, { "api_name": "flask.render_template", "line_number": 388, "usage_type": "call" } ]
179634356
import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import cv2 import glob import pickle from sklearn.model_selection import train_test_split import dataProcess as dp from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC from sklearn.externals import joblib cspace = 'YCrCb' spatial_size = (16,16) hist_bins = 16 orient = 9 pix_per_cell = 8 cell_per_block = 1 hog_channel = 'ALL' spatial_feat = True hist_feat = True hog_feat = True cars, notCars = dp.dataRead('../data/allData.p') carFeature = dp.extract_features(cars, cspace =cspace, bin_size = spatial_size, hist_bins = hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel,binFeat = spatial_feat, histFeat = hist_feat, hogFeat = hog_feat) # print(carFeature) notCarFeature = dp.extract_features(notCars, cspace =cspace, bin_size = spatial_size, hist_bins = hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel,binFeat = spatial_feat, histFeat = hist_feat, hogFeat = hog_feat) # print(notCarFeature) X = np.vstack((carFeature, notCarFeature)).astype(np.float64) X_scaler = StandardScaler().fit(X) scaled_X = X_scaler.transform(X) y = np.hstack((np.ones(len(carFeature)), np.zeros(len(notCarFeature)))) rand_state = np.random.randint(0, 100) X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.2, random_state=rand_state) parameter = dict(color_space = cspace,spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) svc = LinearSVC() svc.fit(X_train, y_train) print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) joblib.dump({'model':svc, 'config':parameter, 'scaling':X_scaler}, '../model/svmCar.pkl') print ('save model success')
null
code/training.py
training.py
py
2,033
python
en
code
null
code-starcoder2
83
[ { "api_name": "dataProcess.dataRead", "line_number": 23, "usage_type": "call" }, { "api_name": "dataProcess.extract_features", "line_number": 25, "usage_type": "call" }, { "api_name": "dataProcess.extract_features", "line_number": 30, "usage_type": "call" }, { "api_name": "numpy.vstack", "line_number": 35, "usage_type": "call" }, { "api_name": "numpy.float64", "line_number": 35, "usage_type": "attribute" }, { "api_name": "sklearn.preprocessing.StandardScaler", "line_number": 36, "usage_type": "call" }, { "api_name": "numpy.hstack", "line_number": 38, "usage_type": "call" }, { "api_name": "numpy.ones", "line_number": 38, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 38, "usage_type": "call" }, { "api_name": "numpy.random.randint", "line_number": 40, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 40, "usage_type": "attribute" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 41, "usage_type": "call" }, { "api_name": "sklearn.svm.LinearSVC", "line_number": 49, "usage_type": "call" }, { "api_name": "sklearn.externals.joblib.dump", "line_number": 53, "usage_type": "call" }, { "api_name": "sklearn.externals.joblib", "line_number": 53, "usage_type": "name" } ]
186711611
from django import forms from contactos.models import Pozo, Pertenece, Area, Atiende, Calificacion from contactos.models import Nota, Recordatorio, Llamada from contactos.models import NumeroTelefonico, TipoNumeroTelefonico from empresas.models import Cliente from principal.models import Vendedor class PozoForm(forms.ModelForm): nombre = forms.CharField(max_length=35, help_text='Nombre: ', \ required=True, widget=forms.TextInput(attrs={'class': 'form-control'})) ubicacion = forms.CharField(max_length=35, help_text='Ubicacion: ', \ required=True, widget=forms.TextInput(attrs={'class': 'form-control'})) empresa = forms.ModelChoiceField(queryset=Cliente.objects.all(), \ help_text='Cliente: ', required=True, widget=forms.Select(attrs={'class': 'form-control'})) class Meta: model = Pozo fields = ('nombre', 'ubicacion', 'cliente',) class LlamadaForm(forms.ModelForm): contacto = forms.ModelChoiceField(queryset=Pozo.objects.all(), help_text='Pozo: ', \ required=True, widget=forms.Select(attrs={'class': 'form-control'})) descripcion = forms.CharField(help_text='Descripciรณn: ', \ required=True, widget=forms.Textarea(attrs={'class': 'form-control'})) class Meta: model = Pozo fields = ('contacto', 'descripcion',) class NotaForm(forms.ModelForm): contacto = forms.ModelChoiceField(queryset=Pozo.objects.all(), help_text='Pozo: ', \ required=True, widget=forms.Select(attrs={'class': 'form-control'})) descripcion = forms.CharField(help_text='Descripciรณn: ', \ required=True, widget=forms.Textarea(attrs={'class': 'form-control'})) clasificacion = forms.ChoiceField(help_text='Clasificaciรณn: ', choices=[(x, x) for x in range(1, 4)], \ widget=forms.Select(attrs={'class': 'form-control'})) class Meta: model = Pozo fields = ('contacto', 'descripcion', 'clasificacion',) class RecordatorioForm(forms.ModelForm): contacto = forms.ModelChoiceField(queryset=Pozo.objects.all(), help_text='Pozo: ', \ required=True, widget=forms.Select(attrs={'class': 'form-control'})) descripcion = forms.CharField(help_text='Descripciรณn: ', \ required=True, widget=forms.Textarea(attrs={'class': 'form-control'})) urgencia = forms.ChoiceField(help_text='Urgencia: ', choices=[(x, x) for x in range(1, 4)], \ widget=forms.Select(attrs={'class': 'form-control'})) fecha = forms.DateField(help_text='Fecha y hora: ', \ widget=forms.DateTimeInput(attrs={'class': 'form-control datepicker'})) class Meta: model = Recordatorio fields = ('contacto', 'descripcion', 'urgencia', 'fecha',) class AtiendeForm(forms.ModelForm): vendedor = forms.ModelChoiceField(queryset=Vendedor.objects.all(), help_text='Vendedor: ', \ required=True, widget=forms.Select(attrs={'class':'form-control'})) class Meta: model = Atiende fields = ('vendedor',) class EditarPozoForm(forms.ModelForm): nombre = forms.CharField(max_length=35, help_text='Nombre: ', \ required=True, widget=forms.TextInput(attrs={'class': 'form-control'})) apellido = forms.CharField(max_length=35, help_text='Apellido: ', \ required=True, widget=forms.TextInput(attrs={'class': 'form-control'})) empresa = forms.ModelChoiceField(queryset=Cliente.objects.all(), \ help_text='Cliente: ', required=True, widget=forms.Select(attrs={'class': 'form-control'})) area = forms.ModelChoiceField(queryset=Area.objects.all(), help_text='Area: ', \ required=True, widget=forms.Select(attrs={'class': 'form-control'})) calificacion = forms.ModelChoiceField(help_text='Calificaciรณn: ', \ queryset=Calificacion.objects.all(), required=True, \ widget=forms.Select(attrs={'class':'form-control'})) is_cliente = forms.BooleanField(help_text='Cliente: ', required=False, \ widget=forms.CheckboxInput(attrs={'class':'form-control'})) class Meta: model = Pozo fields = ('nombre', 'apellido', 'empresa', 'area', \ 'calificacion', 'is_cliente',)
null
claand/contactos/forms.py
forms.py
py
4,140
python
en
code
null
code-starcoder2
83
[ { "api_name": "django.forms.ModelForm", "line_number": 8, "usage_type": "attribute" }, { "api_name": "django.forms", "line_number": 8, "usage_type": "name" }, { "api_name": "django.forms.CharField", "line_number": 9, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 9, "usage_type": "name" }, { "api_name": "django.forms.TextInput", "line_number": 10, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 10, "usage_type": "name" }, { "api_name": "django.forms.CharField", "line_number": 11, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 11, "usage_type": "name" }, { "api_name": "django.forms.TextInput", "line_number": 12, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 12, "usage_type": "name" }, { "api_name": "django.forms.ModelChoiceField", "line_number": 13, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 13, "usage_type": "name" }, { "api_name": "empresas.models.Cliente.objects.all", "line_number": 13, "usage_type": "call" }, { "api_name": "empresas.models.Cliente.objects", "line_number": 13, "usage_type": "attribute" }, { "api_name": "empresas.models.Cliente", "line_number": 13, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 14, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 14, "usage_type": "name" }, { "api_name": "contactos.models.Pozo", "line_number": 17, "usage_type": "name" }, { "api_name": "django.forms.ModelForm", "line_number": 20, "usage_type": "attribute" }, { "api_name": "django.forms", "line_number": 20, "usage_type": "name" }, { "api_name": "django.forms.ModelChoiceField", "line_number": 21, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 21, "usage_type": "name" }, { "api_name": "contactos.models.Pozo.objects.all", "line_number": 21, "usage_type": "call" }, { "api_name": "contactos.models.Pozo.objects", "line_number": 21, "usage_type": "attribute" }, { "api_name": "contactos.models.Pozo", "line_number": 21, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 22, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 22, "usage_type": "name" }, { "api_name": "django.forms.CharField", "line_number": 23, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 23, "usage_type": "name" }, { "api_name": "django.forms.Textarea", "line_number": 24, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 24, "usage_type": "name" }, { "api_name": "contactos.models.Pozo", "line_number": 27, "usage_type": "name" }, { "api_name": "django.forms.ModelForm", "line_number": 30, "usage_type": "attribute" }, { "api_name": "django.forms", "line_number": 30, "usage_type": "name" }, { "api_name": "django.forms.ModelChoiceField", "line_number": 31, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 31, "usage_type": "name" }, { "api_name": "contactos.models.Pozo.objects.all", "line_number": 31, "usage_type": "call" }, { "api_name": "contactos.models.Pozo.objects", "line_number": 31, "usage_type": "attribute" }, { "api_name": "contactos.models.Pozo", "line_number": 31, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 32, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 32, "usage_type": "name" }, { "api_name": "django.forms.CharField", "line_number": 33, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 33, "usage_type": "name" }, { "api_name": "django.forms.Textarea", "line_number": 34, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 34, "usage_type": "name" }, { "api_name": "django.forms.ChoiceField", "line_number": 35, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 35, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 36, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 36, "usage_type": "name" }, { "api_name": "contactos.models.Pozo", "line_number": 39, "usage_type": "name" }, { "api_name": "django.forms.ModelForm", "line_number": 42, "usage_type": "attribute" }, { "api_name": "django.forms", "line_number": 42, "usage_type": "name" }, { "api_name": "django.forms.ModelChoiceField", "line_number": 43, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 43, "usage_type": "name" }, { "api_name": "contactos.models.Pozo.objects.all", "line_number": 43, "usage_type": "call" }, { "api_name": "contactos.models.Pozo.objects", "line_number": 43, "usage_type": "attribute" }, { "api_name": "contactos.models.Pozo", "line_number": 43, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 44, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 44, "usage_type": "name" }, { "api_name": "django.forms.CharField", "line_number": 45, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 45, "usage_type": "name" }, { "api_name": "django.forms.Textarea", "line_number": 46, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 46, "usage_type": "name" }, { "api_name": "django.forms.ChoiceField", "line_number": 47, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 47, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 48, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 48, "usage_type": "name" }, { "api_name": "django.forms.DateField", "line_number": 49, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 49, "usage_type": "name" }, { "api_name": "django.forms.DateTimeInput", "line_number": 50, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 50, "usage_type": "name" }, { "api_name": "contactos.models.Recordatorio", "line_number": 53, "usage_type": "name" }, { "api_name": "django.forms.ModelForm", "line_number": 56, "usage_type": "attribute" }, { "api_name": "django.forms", "line_number": 56, "usage_type": "name" }, { "api_name": "django.forms.ModelChoiceField", "line_number": 57, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 57, "usage_type": "name" }, { "api_name": "principal.models.Vendedor.objects.all", "line_number": 57, "usage_type": "call" }, { "api_name": "principal.models.Vendedor.objects", "line_number": 57, "usage_type": "attribute" }, { "api_name": "principal.models.Vendedor", "line_number": 57, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 58, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 58, "usage_type": "name" }, { "api_name": "contactos.models.Atiende", "line_number": 61, "usage_type": "name" }, { "api_name": "django.forms.ModelForm", "line_number": 64, "usage_type": "attribute" }, { "api_name": "django.forms", "line_number": 64, "usage_type": "name" }, { "api_name": "django.forms.CharField", "line_number": 65, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 65, "usage_type": "name" }, { "api_name": "django.forms.TextInput", "line_number": 66, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 66, "usage_type": "name" }, { "api_name": "django.forms.CharField", "line_number": 67, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 67, "usage_type": "name" }, { "api_name": "django.forms.TextInput", "line_number": 68, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 68, "usage_type": "name" }, { "api_name": "django.forms.ModelChoiceField", "line_number": 69, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 69, "usage_type": "name" }, { "api_name": "empresas.models.Cliente.objects.all", "line_number": 69, "usage_type": "call" }, { "api_name": "empresas.models.Cliente.objects", "line_number": 69, "usage_type": "attribute" }, { "api_name": "empresas.models.Cliente", "line_number": 69, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 70, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 70, "usage_type": "name" }, { "api_name": "django.forms.ModelChoiceField", "line_number": 71, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 71, "usage_type": "name" }, { "api_name": "contactos.models.Area.objects.all", "line_number": 71, "usage_type": "call" }, { "api_name": "contactos.models.Area.objects", "line_number": 71, "usage_type": "attribute" }, { "api_name": "contactos.models.Area", "line_number": 71, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 72, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 72, "usage_type": "name" }, { "api_name": "django.forms.ModelChoiceField", "line_number": 73, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 73, "usage_type": "name" }, { "api_name": "contactos.models.Calificacion.objects.all", "line_number": 74, "usage_type": "call" }, { "api_name": "contactos.models.Calificacion.objects", "line_number": 74, "usage_type": "attribute" }, { "api_name": "contactos.models.Calificacion", "line_number": 74, "usage_type": "name" }, { "api_name": "django.forms.Select", "line_number": 75, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 75, "usage_type": "name" }, { "api_name": "django.forms.BooleanField", "line_number": 76, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 76, "usage_type": "name" }, { "api_name": "django.forms.CheckboxInput", "line_number": 77, "usage_type": "call" }, { "api_name": "django.forms", "line_number": 77, "usage_type": "name" }, { "api_name": "contactos.models.Pozo", "line_number": 80, "usage_type": "name" } ]
566790456
import os import numpy as np import pandas as pd import pickle import quandl import time from datetime import datetime import plotly.offline as py import plotly.graph_objs as go import plotly.figure_factory as ff def get_quandl_data(quandl_id): '''Download and cache Quandl dataseries''' cache_path = '{}.pkl'.format(quandl_id).replace('/','-') try: f = open(cache_path, 'rb') df = pickle.load(f) print('Loaded {} from cache'.format(quandl_id)) except (OSError, IOError) as e: print('Download {} from Quandl'.format(quandl_id)) df = quandl.get(quandl_id, returns='pandas') df.to_pickle(cache_path) print('Cached {} at {}'.format(quandl_id, cache_path)) return df btc_usd_price_kraken = get_quandl_data('BCHARTS/KRAKENUSD') # print(btc_usd_price_kraken.head()) # btc_trace = go.Scatter(x=btc_usd_price_kraken.index, y=btc_usd_price_kraken['Weighted Price']) # py.plot([btc_trace]) exchanges=['COINBASE','BITSTAMP','ITBIT'] exchange_data={} exchange_data['KRAKEN']=btc_usd_price_kraken for exchange in exchanges: exchange_code='BCHARTS/{}USD'.format(exchange) btc_exchange_df=get_quandl_data(exchange_code) exchange_data[exchange]=btc_exchange_df def merge_dfs_on_column(dataframes, labels, col): '''Merge a single column of each dataframe into a new combined dataframe''' series_dict={} for index in range(len(dataframes)): series_dict[labels[index]] = dataframes[index][col] return pd.DataFrame(series_dict) btc_usd_datasets = merge_dfs_on_column(list(exchange_data.values()), list(exchange_data.keys()), 'Weighted Price') # print(btc_usd_datasets.tail()) def df_scatter(df, title, seperate_y_axis=False, y_axis_label='', scale='linear', initial_hide=False): '''Generate a scaatter plot of the entire dataframe''' label_arr = list(df) series_arr = list(map(lambda col:df[col],label_arr)) layout = go.Layout(title=title, legend=dict(orientation='h'), xaxis=dict(type='date'), yaxis=dict(title=y_axis_label, showticklabels=not seperate_y_axis, type=scale)) y_axis_config = dict(overlaying='y', showticklabels=False, type=scale) visibility = 'visible' if initial_hide: visibility = 'legendonly' #From trace for each serise trace_arr=[] for index, series in enumerate(series_arr): trace = go.Scatter(x=series.index, y=series, name=label_arr[index],visible=visibility) #Add seperate axis for the series if seperate_y_axis: trace['yaxis']='y{}'.format(index+1) layout['yaxis{}'.format(index+1)] = y_axis_config trace_arr.append(trace) fig=go.Figure(data=trace_arr, layout=layout) py.plot(fig) btc_usd_datasets.replace(0, np.nan, inplace=True) # df_scatter(btc_usd_datasets, 'Bitcoin Price (USD) By Exchange') btc_usd_datasets['avg_btc_price_usd'] = btc_usd_datasets.mean(axis=1) # btc_trace = go.Scatter(x=btc_usd_datasets.index, y=btc_usd_datasets['avg_btc_price_usd']) # py.plot([btc_trace]) def get_json_data(json_url, cache_path): try: f = open(cache_path, 'rb') df = pickle.load(f) print('Loaded {} from cache'.format(json_url)) except(OSError, IOError) as e: print('Downloading {}'.format(json_url)) df = pd.read_json(json_url) df.to_pickle(cache_path) print('Cache {} at {}'.format(json_url, cache_path)) return df base_polo_url='https://poloniex.com/public?command=returnChartData&currencyPair={}&start={}&end={}&period={}' start_date=datetime.strptime('2015-01-01','%Y-%m-%d')# get data from the start of 2015 end_date=datetime.now()# up until today pediod=86400# pull daily data (86,400 seconds per day) base_coin_market_cap_url = 'https://api.coinmarketcap.com/v1/ticker/{}/?convert=USD' # print(start_date.time()) # print(end_date.time()) def get_crypto_data(poloniex_pair): '''Retrieve cryptocurrency data from poloniex''' json_url=base_polo_url.format(poloniex_pair,time.mktime(start_date.timetuple()),time.mktime(end_date.timetuple()),pediod) data_df=get_json_data(json_url,poloniex_pair) data_df=data_df.set_index('date') return data_df altcoins=['ETH','LTC','XRP','ETC','STR','DASH'] altcoin_data={} for altcoin in altcoins: coinpair='BTC_{}'.format(altcoin) crypto_price_df=get_crypto_data(coinpair) altcoin_data[altcoin]=crypto_price_df # print(altcoin_data['ETH'].tail()) for altcoin in altcoin_data.keys(): altcoin_data[altcoin]['price_usd'] = altcoin_data[altcoin]['weightedAverage'] * btc_usd_datasets['avg_btc_price_usd'] combined_df = merge_dfs_on_column(list(altcoin_data.values()), list(altcoin_data.keys()), 'price_usd') #add BTC price to the dataframe combined_df combined_df['BTC'] = btc_usd_datasets['avg_btc_price_usd'] #Chart all of the altocoin prices df_scatter(combined_df, 'Cryptocurrency Price (USD)', seperate_y_axis=False, y_axis_label='Coin Value(USD)', scale='log') combined_df_2016 = combined_df[combined_df.index.year == 2016] # print(combined_df_2016.pct_change().corr(method='pearson')) def correlation_heatmap(df, title, absolute_bounds=True): '''Plot a correlation heatmap for the entire dataframe''' heatmap = go.Heatmap(z=df.corr(method='pearson').as_matrix(), x=df.columns, y=df.columns, colorbar=dict(title='Pearson Coefficient'),) layout = go.Layout(title=title) if absolute_bounds: heatmap['zmax'] = 1.0 heatmap['zmin'] = -1.0 fig = go.Figure(data=[heatmap], layout=layout) py.plot(fig) # correlation_heatmap(combined_df_2016.pct_change(), "Cryptocurrency Correlations in 2016")
null
DataScience/crypto-currency/PriceTracking.py
PriceTracking.py
py
5,637
python
en
code
null
code-starcoder2
83
[ { "api_name": "pickle.load", "line_number": 18, "usage_type": "call" }, { "api_name": "quandl.get", "line_number": 23, "usage_type": "call" }, { "api_name": "pandas.DataFrame", "line_number": 48, "usage_type": "call" }, { "api_name": "plotly.graph_objs.Layout", "line_number": 58, "usage_type": "call" }, { "api_name": "plotly.graph_objs", "line_number": 58, "usage_type": "name" }, { "api_name": "plotly.graph_objs.Scatter", "line_number": 64, "usage_type": "call" }, { "api_name": "plotly.graph_objs", "line_number": 64, "usage_type": "name" }, { "api_name": "plotly.graph_objs.Figure", "line_number": 69, "usage_type": "call" }, { "api_name": "plotly.graph_objs", "line_number": 69, "usage_type": "name" }, { "api_name": "plotly.offline.plot", "line_number": 70, "usage_type": "call" }, { "api_name": "plotly.offline", "line_number": 70, "usage_type": "name" }, { "api_name": "numpy.nan", "line_number": 72, "usage_type": "attribute" }, { "api_name": "pickle.load", "line_number": 82, "usage_type": "call" }, { "api_name": "pandas.read_json", "line_number": 86, "usage_type": "call" }, { "api_name": "datetime.datetime.strptime", "line_number": 93, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 93, "usage_type": "name" }, { "api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 94, "usage_type": "name" }, { "api_name": "time.mktime", "line_number": 105, "usage_type": "call" }, { "api_name": "plotly.graph_objs.Heatmap", "line_number": 133, "usage_type": "call" }, { "api_name": "plotly.graph_objs", "line_number": 133, "usage_type": "name" }, { "api_name": "plotly.graph_objs.Layout", "line_number": 137, "usage_type": "call" }, { "api_name": "plotly.graph_objs", "line_number": 137, "usage_type": "name" }, { "api_name": "plotly.graph_objs.Figure", "line_number": 140, "usage_type": "call" }, { "api_name": "plotly.graph_objs", "line_number": 140, "usage_type": "name" }, { "api_name": "plotly.offline.plot", "line_number": 141, "usage_type": "call" }, { "api_name": "plotly.offline", "line_number": 141, "usage_type": "name" } ]
100791602
import torch import torch.nn as nn import torchvision from . import resnet, resnext, mobilenet, hrnet from lib.nn import SynchronizedBatchNorm2d BatchNorm2d = SynchronizedBatchNorm2d import math class SegmentationModuleBase(nn.Module): def __init__(self): super(SegmentationModuleBase, self).__init__() def pixel_acc(self, pred, label): _, preds = torch.max(pred, dim=1) valid = (label >= 0).long() acc_sum = torch.sum(valid * (preds == label).long()) pixel_sum = torch.sum(valid) acc = acc_sum.float() / (pixel_sum.float() + 1e-10) return acc class SegmentationModule(SegmentationModuleBase): def __init__(self, net_enc, net_dec, crit, deep_sup_scale=None): super(SegmentationModule, self).__init__() self.encoder = net_enc self.decoder = net_dec self.crit = crit self.deep_sup_scale = deep_sup_scale def forward(self, feed_dict): x1,x2,x3,x4 = self.encoder(feed_dict) pred = self.decoder(x1, x2, x3, x4) return pred class ModelBuilder: # custom weights initialization @staticmethod def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.kaiming_normal_(m.weight.data) elif classname.find('BatchNorm') != -1: m.weight.data.fill_(1.) m.bias.data.fill_(1e-4) #elif classname.find('Linear') != -1: # m.weight.data.normal_(0.0, 0.0001) @staticmethod def build_encoder(arch='resnet50dilated', fc_dim=512, weights=''): pretrained = True if len(weights) == 0 else False orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained) net_encoder = Resnet(orig_resnet) # encoders are usually pretrained # net_encoder.apply(ModelBuilder.weights_init) if len(weights) > 0: print('Loading weights for net_encoder') net_encoder.load_state_dict( torch.load(weights, map_location=lambda storage, loc: storage), strict=False) return net_encoder @staticmethod def build_decoder(arch='ppm_deepsup', fc_dim=512, num_class=150, weights='', use_softmax=False): net_decoder = UPerNet( num_class=num_class, fc_dim=fc_dim, use_softmax=use_softmax, fpn_dim=512) net_decoder.apply(ModelBuilder.weights_init) if len(weights) > 0: print('Loading weights for net_decoder') net_decoder.load_state_dict( torch.load(weights, map_location=lambda storage, loc: storage), strict=False) return net_decoder def conv3x3_bn_relu(in_planes, out_planes, stride=1): "3x3 convolution + BN + relu" return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), BatchNorm2d(out_planes), nn.ReLU(inplace=True), ) class Resnet(nn.Module): def __init__(self, orig_resnet): super(Resnet, self).__init__() # take pretrained resnet, except AvgPool and FC self.conv1 = orig_resnet.conv1 self.bn1 = orig_resnet.bn1 self.relu1 = orig_resnet.relu1 self.conv2 = orig_resnet.conv2 self.bn2 = orig_resnet.bn2 self.relu2 = orig_resnet.relu2 self.conv3 = orig_resnet.conv3 self.bn3 = orig_resnet.bn3 self.relu3 = orig_resnet.relu3 self.maxpool = orig_resnet.maxpool self.layer1 = orig_resnet.layer1 self.layer2 = orig_resnet.layer2 self.layer3 = orig_resnet.layer3 self.layer4 = orig_resnet.layer4 def forward(self, x): x = self.relu1(self.bn1(self.conv1(x))) x = self.relu2(self.bn2(self.conv2(x))) x = self.relu3(self.bn3(self.conv3(x))) x = self.maxpool(x) x1 = self.layer1(x) x2 = self.layer2(x1) x3 = self.layer3(x2) x4 = self.layer4(x3) return (x1,x2,x3,x4) # pyramid pooling class PPM(nn.Module): def __init__(self, num_class=150, fc_dim=4096, use_softmax=False, pool_scales=(1, 2, 3, 6)): super(PPM, self).__init__() self.use_softmax = use_softmax self.ppm = [] for scale in pool_scales: self.ppm.append(nn.Sequential( nn.AdaptiveAvgPool2d(scale), nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), BatchNorm2d(512), nn.ReLU(inplace=True) )) self.ppm = nn.ModuleList(self.ppm) self.conv_last = nn.Sequential( nn.Conv2d(fc_dim+len(pool_scales)*512, 512, kernel_size=3, padding=1, bias=False), BatchNorm2d(512), nn.ReLU(inplace=True), nn.Dropout2d(0.1), nn.Conv2d(512, num_class, kernel_size=1) ) def forward(self, conv_out, segSize=None): conv5 = conv_out[-1] input_size = conv5.size() ppm_out = [conv5] for pool_scale in self.ppm: ppm_out.append(nn.functional.interpolate( pool_scale(conv5), (input_size[2], input_size[3]), mode='bilinear', align_corners=False)) ppm_out = torch.cat(ppm_out, 1) x = self.conv_last(ppm_out) if self.use_softmax: # is True during inference x = nn.functional.interpolate( x, size=segSize, mode='bilinear', align_corners=False) x = nn.functional.softmax(x, dim=1) else: x = nn.functional.log_softmax(x, dim=1) return x # upernet class UPerNet(nn.Module): def __init__(self, num_class=150, fc_dim=4096, use_softmax=False, pool_scales=(1, 2, 3, 6), fpn_inplanes=(256, 512, 1024, 2048), fpn_dim=256): super(UPerNet, self).__init__() self.use_softmax = use_softmax # PPM Module #1x1 H = 10 W = 15 O_H = 1 O_W = 1 stride_H = math.floor(H/O_H) stride_W = math.floor(H/O_W) kernel_H = H - (O_H - 1) * stride_H kernel_W = W - (O_W - 1) * stride_W self.ppm_pooling_1 = nn.AvgPool2d(kernel_size=(kernel_H, kernel_W), stride=(stride_H, stride_W), padding=0) #2x2 H = 10 W = 15 O_H = 2 O_W = 2 stride_H = math.floor(H/O_H) stride_W = math.floor(H/O_W) kernel_H = H - (O_H - 1) * stride_H kernel_W = W - (O_W - 1) * stride_W self.ppm_pooling_2 = nn.AvgPool2d(kernel_size=(kernel_H, kernel_W), stride=(stride_H, stride_W), padding=0) #3x3 H = 10 W = 15 O_H = 3 O_W = 3 stride_H = math.floor(H/O_H) stride_W = math.floor(H/O_W) kernel_H = H - (O_H - 1) * stride_H kernel_W = W - (O_W - 1) * stride_W self.ppm_pooling_3 = nn.AvgPool2d(kernel_size=(kernel_H, kernel_W), stride=(stride_H, stride_W), padding=0) #6x6 H = 10 W = 15 O_H = 6 O_W = 6 stride_H = math.floor(H/O_H) stride_W = math.floor(H/O_W) kernel_H = H - (O_H - 1) * stride_H kernel_W = W - (O_W - 1) * stride_W self.ppm_pooling_4 = nn.AvgPool2d(kernel_size=(kernel_H, kernel_W), stride=(stride_H, stride_W), padding=0) # self.ppm_conv = [ # nn.Sequential(nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),BatchNorm2d(512),nn.ReLU(inplace=True)), # nn.Sequential(nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),BatchNorm2d(512),nn.ReLU(inplace=True)), # nn.Sequential(nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),BatchNorm2d(512),nn.ReLU(inplace=True)), # nn.Sequential(nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),BatchNorm2d(512),nn.ReLU(inplace=True)) # ] self.ppm_conv_1 = nn.Sequential(nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),BatchNorm2d(512),nn.ReLU(inplace=True)) self.ppm_conv_2 = nn.Sequential(nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),BatchNorm2d(512),nn.ReLU(inplace=True)) self.ppm_conv_3 = nn.Sequential(nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),BatchNorm2d(512),nn.ReLU(inplace=True)) self.ppm_conv_4 = nn.Sequential(nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),BatchNorm2d(512),nn.ReLU(inplace=True)) # for scale in pool_scales: # self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale)) # self.ppm_conv.append(nn.Sequential( # nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), # BatchNorm2d(512), # nn.ReLU(inplace=True) # )) # self.ppm_pooling = nn.ModuleList(self.ppm_pooling) # self.ppm_conv = nn.ModuleList(self.ppm_conv) self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1) # FPN Module self.fpn_in = [] for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer self.fpn_in.append(nn.Sequential( nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False), BatchNorm2d(fpn_dim), nn.ReLU(inplace=True) )) self.fpn_in = nn.ModuleList(self.fpn_in) self.fpn_out = [] for i in range(len(fpn_inplanes) - 1): # skip the top layer self.fpn_out.append(nn.Sequential( conv3x3_bn_relu(fpn_dim, fpn_dim, 1), )) self.fpn_out = nn.ModuleList(self.fpn_out) self.conv_last = nn.Sequential( conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1), nn.Conv2d(fpn_dim, num_class, kernel_size=1) ) # def forward(self, conv_out, segSize=None): # conv5 = conv_out[-1] # input_size = conv5.size() # ppm_out = [conv5] # for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv): # ppm_out.append(pool_conv(nn.functional.interpolate( # pool_scale(conv5), # (input_size[2], input_size[3]), # mode='bilinear', align_corners=False))) # ppm_out = torch.cat(ppm_out, 1) # f = self.ppm_last_conv(ppm_out) # fpn_feature_list = [f] # for i in reversed(range(len(conv_out) - 1)): # conv_x = conv_out[i] # conv_x = self.fpn_in[i](conv_x) # lateral branch # f = nn.functional.interpolate( # f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch # f = conv_x + f # fpn_feature_list.append(self.fpn_out[i](f)) # fpn_feature_list.reverse() # [P2 - P5] # output_size = fpn_feature_list[0].size()[2:] # fusion_list = [fpn_feature_list[0]] # for i in range(1, len(fpn_feature_list)): # fusion_list.append(nn.functional.interpolate( # fpn_feature_list[i], # output_size, # mode='bilinear', align_corners=False)) # fusion_out = torch.cat(fusion_list, 1) # x = self.conv_last(fusion_out) # if self.use_softmax: # is True during inference # x = nn.functional.interpolate( # x, size=segSize, mode='bilinear', align_corners=False) # x = nn.functional.softmax(x, dim=1) # return x # x = nn.functional.log_softmax(x, dim=1) # return x def forward(self, x1, x2, x3, x4): pp3 = self.ppm_conv_1(nn.functional.interpolate(self.ppm_pooling_1(x4),(10, 15),mode='bilinear', align_corners=False)) pp2 = self.ppm_conv_2(nn.functional.interpolate(self.ppm_pooling_2(x4),(10, 15),mode='bilinear', align_corners=False)) pp1 = self.ppm_conv_3(nn.functional.interpolate(self.ppm_pooling_3(x4),(10, 15),mode='bilinear', align_corners=False)) pp0 = self.ppm_conv_4(nn.functional.interpolate(self.ppm_pooling_4(x4),(10, 15),mode='bilinear', align_corners=False)) ppm_out = torch.cat([x4, pp3, pp2, pp1, pp0], 1) p4 = self.ppm_last_conv(ppm_out) conv_x3 = self.fpn_in[2](x3) f = nn.functional.interpolate(p4, size=(20, 30), mode='bilinear', align_corners=False) f = conv_x3 + f p3 = self.fpn_out[2](f) conv_x2 = self.fpn_in[1](x2) f = nn.functional.interpolate(f, size=(40, 60), mode='bilinear', align_corners=False) f = conv_x2 + f p2 = self.fpn_out[1](f) conv_x1 = self.fpn_in[0](x1) f = nn.functional.interpolate(f, size=(80, 120), mode='bilinear', align_corners=False) f = conv_x1 + f p1 = self.fpn_out[0](f) cat_2 = nn.functional.interpolate(p2,(80, 120),mode='bilinear', align_corners=False) cat_3 = nn.functional.interpolate(p3,(80, 120),mode='bilinear', align_corners=False) cat_4 = nn.functional.interpolate(p4,(80, 120),mode='bilinear', align_corners=False) fusion_out = torch.cat((p1,cat_2,cat_3,cat_4), 1) x = self.conv_last(fusion_out) x = nn.functional.interpolate(x, size=(512, 768), mode='bilinear', align_corners=False) x = nn.functional.softmax(x, dim=1) print(x.shape) return x
null
models/models.py
models.py
py
13,391
python
en
code
null
code-starcoder2
83
[ { "api_name": "lib.nn.SynchronizedBatchNorm2d", "line_number": 6, "usage_type": "name" }, { "api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 9, "usage_type": "name" }, { "api_name": "torch.max", "line_number": 14, "usage_type": "call" }, { "api_name": "torch.sum", "line_number": 16, "usage_type": "call" }, { "api_name": "torch.sum", "line_number": 17, "usage_type": "call" }, { "api_name": "torch.nn.init.kaiming_normal_", "line_number": 42, "usage_type": "call" }, { "api_name": "torch.nn.init", "line_number": 42, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 42, "usage_type": "name" }, { "api_name": "torch.load", "line_number": 60, "usage_type": "call" }, { "api_name": "torch.load", "line_number": 78, "usage_type": "call" }, { "api_name": "torch.nn.Sequential", "line_number": 84, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 84, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 85, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 85, "usage_type": "name" }, { "api_name": "torch.nn.ReLU", "line_number": 88, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 88, "usage_type": "name" }, { "api_name": "torch.nn.Module", "line_number": 92, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 92, "usage_type": "name" }, { "api_name": "torch.nn.Module", "line_number": 126, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 126, "usage_type": "name" }, { "api_name": "torch.nn.Sequential", "line_number": 134, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 134, "usage_type": "name" }, { "api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 135, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 135, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 136, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 136, "usage_type": "name" }, { "api_name": "torch.nn.ReLU", "line_number": 138, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 138, "usage_type": "name" }, { "api_name": "torch.nn.ModuleList", "line_number": 140, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 140, "usage_type": "name" }, { "api_name": "torch.nn.Sequential", "line_number": 142, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 142, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 143, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 143, "usage_type": "name" }, { "api_name": "torch.nn.ReLU", "line_number": 146, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 146, "usage_type": "name" }, { "api_name": "torch.nn.Dropout2d", "line_number": 147, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 147, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 148, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 148, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 157, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 157, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 157, "usage_type": "name" }, { "api_name": "torch.cat", "line_number": 161, "usage_type": "call" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 166, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 166, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 166, "usage_type": "name" }, { "api_name": "torch.nn.functional.softmax", "line_number": 168, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 168, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 168, "usage_type": "name" }, { "api_name": "torch.nn.functional.log_softmax", "line_number": 170, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 170, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 170, "usage_type": "name" }, { "api_name": "torch.nn.Module", "line_number": 174, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 174, "usage_type": "name" }, { "api_name": "math.floor", "line_number": 187, "usage_type": "call" }, { "api_name": "math.floor", "line_number": 188, "usage_type": "call" }, { "api_name": "torch.nn.AvgPool2d", "line_number": 191, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 191, "usage_type": "name" }, { "api_name": "math.floor", "line_number": 197, "usage_type": "call" }, { "api_name": "math.floor", "line_number": 198, "usage_type": "call" }, { "api_name": "torch.nn.AvgPool2d", "line_number": 201, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 201, "usage_type": "name" }, { "api_name": "math.floor", "line_number": 207, "usage_type": "call" }, { "api_name": "math.floor", "line_number": 208, "usage_type": "call" }, { "api_name": "torch.nn.AvgPool2d", "line_number": 211, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 211, "usage_type": "name" }, { "api_name": "math.floor", "line_number": 217, "usage_type": "call" }, { "api_name": "math.floor", "line_number": 218, "usage_type": "call" }, { "api_name": "torch.nn.AvgPool2d", "line_number": 221, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 221, "usage_type": "name" }, { "api_name": "torch.nn.Sequential", "line_number": 229, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 229, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 229, "usage_type": "call" }, { "api_name": "torch.nn.ReLU", "line_number": 229, "usage_type": "call" }, { "api_name": "torch.nn.Sequential", "line_number": 230, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 230, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 230, "usage_type": "call" }, { "api_name": "torch.nn.ReLU", "line_number": 230, "usage_type": "call" }, { "api_name": "torch.nn.Sequential", "line_number": 231, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 231, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 231, "usage_type": "call" }, { "api_name": "torch.nn.ReLU", "line_number": 231, "usage_type": "call" }, { "api_name": "torch.nn.Sequential", "line_number": 232, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 232, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 232, "usage_type": "call" }, { "api_name": "torch.nn.ReLU", "line_number": 232, "usage_type": "call" }, { "api_name": "torch.nn.Sequential", "line_number": 249, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 249, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 250, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 250, "usage_type": "name" }, { "api_name": "torch.nn.ReLU", "line_number": 252, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 252, "usage_type": "name" }, { "api_name": "torch.nn.ModuleList", "line_number": 254, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 254, "usage_type": "name" }, { "api_name": "torch.nn.Sequential", "line_number": 258, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 258, "usage_type": "name" }, { "api_name": "torch.nn.ModuleList", "line_number": 261, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 261, "usage_type": "name" }, { "api_name": "torch.nn.Sequential", "line_number": 263, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 263, "usage_type": "name" }, { "api_name": "torch.nn.Conv2d", "line_number": 265, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 265, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 315, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 315, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 315, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 316, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 316, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 316, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 317, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 317, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 317, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 318, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 318, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 318, "usage_type": "name" }, { "api_name": "torch.cat", "line_number": 320, "usage_type": "call" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 324, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 324, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 324, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 329, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 329, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 329, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 334, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 334, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 334, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 338, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 338, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 338, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 339, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 339, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 339, "usage_type": "name" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 340, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 340, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 340, "usage_type": "name" }, { "api_name": "torch.cat", "line_number": 342, "usage_type": "call" }, { "api_name": "torch.nn.functional.interpolate", "line_number": 344, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 344, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 344, "usage_type": "name" }, { "api_name": "torch.nn.functional.softmax", "line_number": 345, "usage_type": "call" }, { "api_name": "torch.nn.functional", "line_number": 345, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 345, "usage_type": "name" } ]
199128348
import numpy as np from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Convolution2D, MaxPooling2D from keras.optimizers import SGD from load import DataLoader from scipy.stats import itemfreq loader = DataLoader() loader.load_train() X_train = loader.images_train Y_train = loader.labels_train mean = np.mean(X_train, axis=0) X_train -= mean X_train = X_train.swapaxes(3, 1) batch_size = 32 nb_classes = 5 model = Sequential() model.add(Convolution2D(32, 3, 3, 3, border_mode='full')) model.add(Activation('relu')) model.add(Convolution2D(32, 32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(poolsize=(2, 2))) model.add(Dropout(0.25)) model.add(Convolution2D(64, 32, 3, 3, border_mode='full')) model.add(Activation('relu')) model.add(Convolution2D(64, 64, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(poolsize=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(64*8*8, 512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(512, nb_classes)) model.add(Activation('softmax')) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd) model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=30) X_valid = loader.images_valid X_valid -= mean X_valid = X_valid.swapaxes(3, 1) Y_valid = loader.labels_valid model.evaluate(X_valid, Y_valid, batch_size, show_accuracy=True) # # predicting # loader.load_test() # X_test = loader.images_test # pred = model.predict_classes(X_test) # # print itemfreq(pred)
null
keras_cnn.py
keras_cnn.py
py
1,640
python
en
code
null
code-starcoder2
83
[ { "api_name": "load.DataLoader", "line_number": 10, "usage_type": "call" }, { "api_name": "numpy.mean", "line_number": 15, "usage_type": "call" }, { "api_name": "keras.models.Sequential", "line_number": 22, "usage_type": "call" }, { "api_name": "keras.layers.convolutional.Convolution2D", "line_number": 24, "usage_type": "call" }, { "api_name": "keras.layers.core.Activation", "line_number": 25, "usage_type": "call" }, { "api_name": "keras.layers.convolutional.Convolution2D", "line_number": 26, "usage_type": "call" }, { "api_name": "keras.layers.core.Activation", "line_number": 27, "usage_type": "call" }, { "api_name": "keras.layers.convolutional.MaxPooling2D", "line_number": 28, "usage_type": "call" }, { "api_name": "keras.layers.core.Dropout", "line_number": 29, "usage_type": "call" }, { "api_name": "keras.layers.convolutional.Convolution2D", "line_number": 31, "usage_type": "call" }, { "api_name": "keras.layers.core.Activation", "line_number": 32, "usage_type": "call" }, { "api_name": "keras.layers.convolutional.Convolution2D", "line_number": 33, "usage_type": "call" }, { "api_name": "keras.layers.core.Activation", "line_number": 34, "usage_type": "call" }, { "api_name": "keras.layers.convolutional.MaxPooling2D", "line_number": 35, "usage_type": "call" }, { "api_name": "keras.layers.core.Dropout", "line_number": 36, "usage_type": "call" }, { "api_name": "keras.layers.core.Flatten", "line_number": 38, "usage_type": "call" }, { "api_name": "keras.layers.core.Dense", "line_number": 39, "usage_type": "call" }, { "api_name": "keras.layers.core.Activation", "line_number": 40, "usage_type": "call" }, { "api_name": "keras.layers.core.Dropout", "line_number": 41, "usage_type": "call" }, { "api_name": "keras.layers.core.Dense", "line_number": 43, "usage_type": "call" }, { "api_name": "keras.layers.core.Activation", "line_number": 44, "usage_type": "call" }, { "api_name": "keras.optimizers.SGD", "line_number": 46, "usage_type": "call" } ]
257560395
from django.urls import path from . import views urlpatterns = [ path('', views.principal, name='cliente'), path('crear_cliente/', views.crear), path('modificar_cliente/', views.modificar), path('borrar_cliente/', views.borrar), path('cuentas_cliente/', views.cuentas), path(r'crear_cuenta/(?P<dni>d+)/$', views.crear_cuentas, name='crear_cuentas'), path(r'deposito/(?P<numero>d+)/$', views.depositar, name='deposito'), path(r'retiro/(?P<numero>d+)/$', views.retirar, name='retiro'), path(r'transferencia/(?P<numero>d+)/$', views.transferir, name='transferencia'), path(r'borrar_cuenta/(?P<numero>d+)/$', views.borrar_cuenta, name='borrar_cuenta'), ]
null
cooperativa2019/ape/modelo/urls.py
urls.py
py
693
python
en
code
null
code-starcoder2
83
[ { "api_name": "django.urls.path", "line_number": 7, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 8, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 9, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 10, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 11, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 12, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 13, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 14, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 15, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 16, "usage_type": "call" } ]
508325791
from discord.ext.commands import Bot as BotBase from discord.ext import commands import os PRIFIX = "~" class Bot(BotBase): def __init__(self): self.PRIFIX = PRIFIX super().__init__(command_prefix=PRIFIX) def setup(self): for filename in os.listdir("./lib/cogs"): if filename.endswith('.py'): super().load_extension(f'lib.cogs.{filename[:-3]}') print(f'cogs.{filename} cog loaded') print("setup completed") def run(self): print("running setup") self.setup() with open("./lib/bot/token.txt", "r") as t: self.TOKEN = t.readlines()[0].strip() print("running bot...") super().run(self.TOKEN) async def on_connect(self): print("Bot connected") async def on_disconnect(self): print("Bot disconnected") async def on_ready(self): print("Hello I am ready") bot=Bot()
null
lib/bot/__init__.py
__init__.py
py
974
python
en
code
null
code-starcoder2
83
[ { "api_name": "discord.ext.commands.Bot", "line_number": 7, "usage_type": "name" }, { "api_name": "os.listdir", "line_number": 14, "usage_type": "call" } ]
482993743
import itertools as it def erat3( ): D = { 9: 3, 25: 5 } yield 2 yield 3 yield 5 MASK= 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, MODULOS= frozenset( (1, 7, 11, 13, 17, 19, 23, 29) ) for q in it.compress( it.islice(it.count(7), 0, None, 2), it.cycle(MASK)): p = D.pop(q, None) if p is None: D[q*q] = q yield q else: x = q + 2*p while x in D or (x%30) not in MODULOS: x += 2*p D[x] = p def genprimelist(n): gen = erat3() primelist = [] nextprime = next(gen) while(nextprime < n): primelist.append(nextprime) nextprime = next(gen) return primelist def only1379(primestring): evens = ["0","2","4","5","6","8"] for char in primestring: if char in evens: return False return True def solve35(n): circulars = set([]) notcircular = set([]) primelist = genprimelist(n) for prime in primelist: primestring = str(prime) length = len(primestring) #if it is 1 digit if length == 1: circulars.update([prime]) else: if (prime in circulars) or (prime in notcircular): pass else: #Any digits other than 1,3,7,9 will have a nonprime iteration if(not(only1379(primestring))): pass else: #Generate all possibilities and iterations = [prime] nextiter = primestring[1:]+primestring[0] while nextiter != primestring: iterations.append(int(nextiter)) nextiter = nextiter[1:]+nextiter[0] notprime = False for iter in iterations: if iter in primelist: pass else: notprime = True break #Move to correct list #Noncircular only contains values that passed #the digit check if notprime: notcircular.update(iterations) else: circulars.update(iterations) return len(circulars)
null
Problems 31 - 40/problem 35/problem 35.py
problem 35.py
py
1,908
python
en
code
null
code-starcoder2
83
[ { "api_name": "itertools.compress", "line_number": 11, "usage_type": "call" }, { "api_name": "itertools.islice", "line_number": 12, "usage_type": "call" }, { "api_name": "itertools.count", "line_number": 12, "usage_type": "call" }, { "api_name": "itertools.cycle", "line_number": 13, "usage_type": "call" } ]
391603773
from flask import Blueprint, session, abort, request, current_app, jsonify from flask_login import LoginManager, login_required, current_user, logout_user, login_user from dunder_funk import db login_manager = LoginManager() login_manager.login_view = 'authentication.login' auth_blueprint = Blueprint('authentication', __name__) @login_manager.user_loader def load_user(id): return db.get_client_by_id(int(id)) @auth_blueprint.errorhandler(400) def auth400(error): return jsonify({'message': error.description}) @auth_blueprint.route('/register', methods=['PUT']) def do_registration(): data = request.get_json() if not data.get('username'): abort(400, 'No username provided in client registration request') elif not data.get('password'): abort(400, 'No password provided in client registration request') elif not data.get('description'): abort(400, 'No player description provided') return jsonify(db.create_client(**data)) @auth_blueprint.route('/login', methods=['POST']) def login(): data = request.get_json() result = db.check_credentials(**data) if result is None: abort(401, 'Login failed') login_user(db.get_client_by_id(result['id'])) return jsonify(result) @auth_blueprint.route('/client/<int:client_id>/status', methods=['POST']) @login_required def client_status(): return jsonify(current_user.serialize) @auth_blueprint.route('/logout', methods=['GET', 'POST']) @login_required def logout(): logout_user() return jsonify({'message': 'Logged out'}) @auth_blueprint.route('/login', methods=['GET']) def usage(): response = jsonify({'usage': 'post username and password to /login'}) response.status_code = 401 return response @auth_blueprint.route('/session', methods=['GET', 'POST']) @login_required def session_info(): session_header = request.headers.get('Session-Token') if session_header is not None: session = db.get_session(session_header) if session is None: abort(404, 'Unable to locate current session') return jsonify(session) data = request.get_json() if not data: abort(400, 'Missing required data for session retrieval') auth = db.get_active_auth(data.get('client_id')) if auth is None: auth = db.authorize_client(data.get('client_id')) else: auth = auth.serialize if auth is None: abort(404, 'Unable to locate active client authentication') return jsonify(db.get_session_by_id(auth['session_id']).serialize)
null
dunder_funk/server/routes/auth.py
auth.py
py
2,562
python
en
code
null
code-starcoder2
83
[ { "api_name": "flask_login.LoginManager", "line_number": 6, "usage_type": "call" }, { "api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call" }, { "api_name": "dunder_funk.db.get_client_by_id", "line_number": 14, "usage_type": "call" }, { "api_name": "dunder_funk.db", "line_number": 14, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 19, "usage_type": "call" }, { "api_name": "flask.request.get_json", "line_number": 24, "usage_type": "call" }, { "api_name": "flask.request", "line_number": 24, "usage_type": "name" }, { "api_name": "flask.abort", "line_number": 26, "usage_type": "call" }, { "api_name": "flask.abort", "line_number": 28, "usage_type": "call" }, { "api_name": "flask.abort", "line_number": 30, "usage_type": "call" }, { "api_name": "flask.jsonify", "line_number": 31, "usage_type": "call" }, { "api_name": "dunder_funk.db.create_client", "line_number": 31, "usage_type": "call" }, { "api_name": "dunder_funk.db", "line_number": 31, "usage_type": "name" }, { "api_name": "flask.request.get_json", "line_number": 36, "usage_type": "call" }, { "api_name": "flask.request", "line_number": 36, "usage_type": "name" }, { "api_name": "dunder_funk.db.check_credentials", "line_number": 37, "usage_type": "call" }, { "api_name": "dunder_funk.db", "line_number": 37, "usage_type": "name" }, { "api_name": "flask.abort", "line_number": 39, "usage_type": "call" }, { "api_name": "flask_login.login_user", "line_number": 40, "usage_type": "call" }, { "api_name": "dunder_funk.db.get_client_by_id", "line_number": 40, "usage_type": "call" }, { "api_name": "dunder_funk.db", "line_number": 40, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 41, "usage_type": "call" }, { "api_name": "flask.jsonify", "line_number": 47, "usage_type": "call" }, { "api_name": "flask_login.current_user.serialize", "line_number": 47, "usage_type": "attribute" }, { "api_name": "flask_login.current_user", "line_number": 47, "usage_type": "name" }, { "api_name": "flask_login.login_required", "line_number": 45, "usage_type": "name" }, { "api_name": "flask_login.logout_user", "line_number": 53, "usage_type": "call" }, { "api_name": "flask.jsonify", "line_number": 54, "usage_type": "call" }, { "api_name": "flask_login.login_required", "line_number": 51, "usage_type": "name" }, { "api_name": "flask.jsonify", "line_number": 59, "usage_type": "call" }, { "api_name": "flask.request.headers.get", "line_number": 67, "usage_type": "call" }, { "api_name": "flask.request.headers", "line_number": 67, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 67, "usage_type": "name" }, { "api_name": "flask.session", "line_number": 69, "usage_type": "name" }, { "api_name": "dunder_funk.db.get_session", "line_number": 69, "usage_type": "call" }, { "api_name": "dunder_funk.db", "line_number": 69, "usage_type": "name" }, { "api_name": "flask.session", "line_number": 70, "usage_type": "name" }, { "api_name": "flask.abort", "line_number": 71, "usage_type": "call" }, { "api_name": "flask.jsonify", "line_number": 72, "usage_type": "call" }, { "api_name": "flask.session", "line_number": 72, "usage_type": "argument" }, { "api_name": "flask.request.get_json", "line_number": 74, "usage_type": "call" }, { "api_name": "flask.request", "line_number": 74, "usage_type": "name" }, { "api_name": "flask.abort", "line_number": 76, "usage_type": "call" }, { "api_name": "dunder_funk.db.get_active_auth", "line_number": 77, "usage_type": "call" }, { "api_name": "dunder_funk.db", "line_number": 77, "usage_type": "name" }, { "api_name": "dunder_funk.db.authorize_client", "line_number": 79, "usage_type": "call" }, { "api_name": "dunder_funk.db", "line_number": 79, "usage_type": "name" }, { "api_name": "flask.abort", "line_number": 83, "usage_type": "call" }, { "api_name": "flask.jsonify", "line_number": 84, "usage_type": "call" }, { "api_name": "dunder_funk.db.get_session_by_id", "line_number": 84, "usage_type": "call" }, { "api_name": "dunder_funk.db", "line_number": 84, "usage_type": "name" }, { "api_name": "flask_login.login_required", "line_number": 65, "usage_type": "name" } ]
314833730
from rodan.models.workflow import Workflow from rest_framework import serializers from rodan.serializers.page import MinimalPageSerializer from rodan.serializers.workflowjob import WorkflowJobSerializer from rodan.serializers.workflowrun import WorkflowRunSerializer class WorkflowSerializer(serializers.HyperlinkedModelSerializer): project = serializers.HyperlinkedRelatedField(view_name="project-detail") creator = serializers.HyperlinkedRelatedField(view_name="user-detail") pages = MinimalPageSerializer() # pages = serializers.HyperlinkedRelatedField(many=True, view_name="page-detail") workflow_jobs = WorkflowJobSerializer() workflow_runs = WorkflowRunSerializer() uuid = serializers.Field(source='uuid') # runs = serializers.IntegerField(required=False) class Meta: model = Workflow read_only_fields = ('created', 'updated', 'runs') fields = ("url", "uuid", "name", "project", 'runs', "pages", "workflow_jobs", "description", "has_started", "created", "updated", "workflow_runs") class WorkflowListSerializer(serializers.HyperlinkedModelSerializer): uuid = serializers.Field(source='uuid') project = serializers.HyperlinkedRelatedField(view_name="project-detail") creator = serializers.HyperlinkedRelatedField(view_name="user-detail") class Meta: model = Workflow read_only_fields = ('created', 'updated', 'runs') fields = ('url', "uuid", 'project', 'creator', 'uuid', 'name', 'created', 'updated')
null
rodan/serializers/workflow.py
workflow.py
py
1,837
python
en
code
null
code-starcoder2
83
[ { "api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 8, "usage_type": "attribute" }, { "api_name": "rest_framework.serializers", "line_number": 8, "usage_type": "name" }, { "api_name": "rest_framework.serializers.HyperlinkedRelatedField", "line_number": 9, "usage_type": "call" }, { "api_name": "rest_framework.serializers", "line_number": 9, "usage_type": "name" }, { "api_name": "rest_framework.serializers.HyperlinkedRelatedField", "line_number": 10, "usage_type": "call" }, { "api_name": "rest_framework.serializers", "line_number": 10, "usage_type": "name" }, { "api_name": "rodan.serializers.page.MinimalPageSerializer", "line_number": 11, "usage_type": "call" }, { "api_name": "rodan.serializers.workflowjob.WorkflowJobSerializer", "line_number": 13, "usage_type": "call" }, { "api_name": "rodan.serializers.workflowrun.WorkflowRunSerializer", "line_number": 14, "usage_type": "call" }, { "api_name": "rest_framework.serializers.Field", "line_number": 15, "usage_type": "call" }, { "api_name": "rest_framework.serializers", "line_number": 15, "usage_type": "name" }, { "api_name": "rodan.models.workflow.Workflow", "line_number": 19, "usage_type": "name" }, { "api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 35, "usage_type": "attribute" }, { "api_name": "rest_framework.serializers", "line_number": 35, "usage_type": "name" }, { "api_name": "rest_framework.serializers.Field", "line_number": 36, "usage_type": "call" }, { "api_name": "rest_framework.serializers", "line_number": 36, "usage_type": "name" }, { "api_name": "rest_framework.serializers.HyperlinkedRelatedField", "line_number": 37, "usage_type": "call" }, { "api_name": "rest_framework.serializers", "line_number": 37, "usage_type": "name" }, { "api_name": "rest_framework.serializers.HyperlinkedRelatedField", "line_number": 38, "usage_type": "call" }, { "api_name": "rest_framework.serializers", "line_number": 38, "usage_type": "name" }, { "api_name": "rodan.models.workflow.Workflow", "line_number": 41, "usage_type": "name" } ]
18581393
import unittest import torch import numpy as np from nl2prog.nn.nl2code import Predictor, ActionSequenceReader from nl2prog.nn.utils import rnn class TestPredictor(unittest.TestCase): def test_parameters(self): reader = ActionSequenceReader(1, 1, 1, 2, 3) predictor = Predictor(reader, 1, 2, 3, 5) self.assertEqual(17, len(dict(predictor.named_parameters()))) def test_shape(self): reader = ActionSequenceReader(1, 1, 1, 1, 1) predictor = Predictor(reader, 1, 2, 3, 5) feature0 = torch.rand(2, 3) feature1 = torch.rand(1, 3) feature = rnn.pad_sequence([feature0, feature1]) context0 = torch.rand(2, 2) context1 = torch.rand(1, 2) context = rnn.pad_sequence([context0, context1]) query0 = torch.rand(3, 2) query1 = torch.rand(1, 2) query = rnn.pad_sequence([query0, query1]) rule_pred, token_pred, copy_pred = predictor( query, (feature, context)) self.assertTrue(np.array_equal( [[1, 1], [1, 0]], rule_pred.mask.numpy())) self.assertEqual((2, 2, 1), rule_pred.data.shape) self.assertTrue(np.allclose([[1, 1], [1, 1]], np.sum( rule_pred.data.detach().numpy(), axis=2))) self.assertEqual((2, 2, 1), token_pred.data.shape) self.assertTrue(np.array_equal( [[1, 1], [1, 0]], token_pred.mask.numpy())) self.assertEqual((2, 2, 3), copy_pred.data.shape) self.assertTrue(np.array_equal( [[1, 1], [1, 0]], copy_pred.mask.numpy())) self.assertTrue(np.allclose([[1, 1], [1, 1]], np.sum(token_pred.data.detach().numpy(), axis=2) + np.sum(copy_pred.data.detach().numpy(), axis=2))) if __name__ == "__main__": unittest.main()
null
test/nn/nl2code/test_predictor.py
test_predictor.py
py
1,932
python
en
code
null
code-starcoder2
83
[ { "api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute" }, { "api_name": "nl2prog.nn.nl2code.ActionSequenceReader", "line_number": 11, "usage_type": "call" }, { "api_name": "nl2prog.nn.nl2code.Predictor", "line_number": 12, "usage_type": "call" }, { "api_name": "nl2prog.nn.nl2code.ActionSequenceReader", "line_number": 16, "usage_type": "call" }, { "api_name": "nl2prog.nn.nl2code.Predictor", "line_number": 17, "usage_type": "call" }, { "api_name": "torch.rand", "line_number": 18, "usage_type": "call" }, { "api_name": "torch.rand", "line_number": 19, "usage_type": "call" }, { "api_name": "nl2prog.nn.utils.rnn.pad_sequence", "line_number": 20, "usage_type": "call" }, { "api_name": "nl2prog.nn.utils.rnn", "line_number": 20, "usage_type": "name" }, { "api_name": "torch.rand", "line_number": 21, "usage_type": "call" }, { "api_name": "torch.rand", "line_number": 22, "usage_type": "call" }, { "api_name": "nl2prog.nn.utils.rnn.pad_sequence", "line_number": 23, "usage_type": "call" }, { "api_name": "nl2prog.nn.utils.rnn", "line_number": 23, "usage_type": "name" }, { "api_name": "torch.rand", "line_number": 24, "usage_type": "call" }, { "api_name": "torch.rand", "line_number": 25, "usage_type": "call" }, { "api_name": "nl2prog.nn.utils.rnn.pad_sequence", "line_number": 26, "usage_type": "call" }, { "api_name": "nl2prog.nn.utils.rnn", "line_number": 26, "usage_type": "name" }, { "api_name": "numpy.array_equal", "line_number": 30, "usage_type": "call" }, { "api_name": "numpy.allclose", "line_number": 33, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 33, "usage_type": "call" }, { "api_name": "numpy.array_equal", "line_number": 36, "usage_type": "call" }, { "api_name": "numpy.array_equal", "line_number": 39, "usage_type": "call" }, { "api_name": "numpy.allclose", "line_number": 41, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 42, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 44, "usage_type": "call" }, { "api_name": "unittest.main", "line_number": 49, "usage_type": "call" } ]
34997065
# -*- coding: utf-8 -*- from scrapy.linkextractors import LinkExtractor from scrapy.http import TextResponse from scrapy.loader import XPathItemLoader from scrapy.loader.processors import TakeFirst from scrapy.selector import HtmlXPathSelector from scrapy.spiders import CrawlSpider from scrapy.spiders import Rule from yp.items import YpItem class OrphanLoader(XPathItemLoader): default_output_processor = TakeFirst() class MySpider(CrawlSpider): name = "yp" allowed_domains = ["qatcom.com"] start_urls = ["http://www.qatcom.com/listings/search?what=&searchtype=both"] rules = ( Rule(LinkExtractor(allow=(), restrict_xpaths=("//a[text()='Next >']")), follow=True), Rule(LinkExtractor(allow=(), restrict_xpaths=("//div[@class='title']/a",)), callback="parse_item", follow=True), ) # "/qatar-categories/[0-9]+/[0-9]+" # "//div[@class='col-sm-12 col-lg-12 ct-u-marginBottom40']/div/a", "//a[@aria-label='Next']" , # "//div[@class='search_title']/a", # "/qatar-companies-profile-rev/[0-9]+/[A-z]+" def parse_item(self, response): hxs = HtmlXPathSelector(response) l = OrphanLoader(YpItem(), hxs) #hxs = HtmlXPathSelector(response) items = [] item = YpItem() #item["Work_Email"] = ' '.join(''.join(hxs.select("//div[@class='form-group ']/input[@id='email']//@value").extract()).rsplit()) item["Fax"] = ' '.join(''.join(hxs.select("//dd[@class='fax']/text()").extract()).replace('(00974)', '+974').replace('(+974)', '+974').rsplit()) item["Work_Phone"] = ' '.join(''.join(hxs.select("//dd[@class='telephone']/text()").extract()).replace('(00974)', '+974').replace('(+974)', '+974').rsplit()) item["Mobile"] = ' '.join(''.join(hxs.select("//dd[@class='mobile']/text()").extract()).replace('(00974)', '+974').replace('(+974)', '+974').rsplit()) address = ' '.join(''.join(hxs.select("//dd[@class='location']/text()").extract()).rsplit()) item["Industry"] = ' '.join(''.join(hxs.select("//dl[@class='category']/dd/text()").extract()).rsplit()) item["Company_Type"] = 'Client' item["Company_Name"] = ' '.join(''.join(hxs.select("///div[@class='title']/a/text()").extract()).rsplit()) if not address: pass else: item["Street"] = address item["Address"] = address # item["City"] = ''.join(address.rsplit(',')[-2]) item['Zip'] =''.join(''.join(hxs.select("//dd[@class='po_box']/text()").extract()).rsplit(',')[0]) # item["Corporate_Website"] = ' '.join(''.join(hxs.select("//div[@class='category-logo']/a/@href").extract()).rsplit()) item["Country"] = 'Qatar' if not item["Company_Name"]: pass else: items.append(item) return items
null
malaysia/Spiders/scrapy_Qatar2.py
scrapy_Qatar2.py
py
2,827
python
en
code
null
code-starcoder2
83
[ { "api_name": "scrapy.loader.XPathItemLoader", "line_number": 12, "usage_type": "name" }, { "api_name": "scrapy.loader.processors.TakeFirst", "line_number": 13, "usage_type": "call" }, { "api_name": "scrapy.spiders.CrawlSpider", "line_number": 15, "usage_type": "name" }, { "api_name": "scrapy.spiders.Rule", "line_number": 20, "usage_type": "call" }, { "api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 20, "usage_type": "call" }, { "api_name": "scrapy.spiders.Rule", "line_number": 21, "usage_type": "call" }, { "api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 21, "usage_type": "call" }, { "api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 28, "usage_type": "call" }, { "api_name": "yp.items.YpItem", "line_number": 29, "usage_type": "call" }, { "api_name": "yp.items.YpItem", "line_number": 32, "usage_type": "call" } ]
339893920
import requests from projects.golem_api.pages.utils import url, headers DELETE_SUITE_ENDPOINT = '/suite/delete' DUPLICATE_SUITE_ENDPOINT = '/suite/duplicate' RENAME_SUITE_ENDPOINT = '/suite/rename' RUN_SUITE_ENDPOINT = '/suite/run' SAVE_SUITE_ENDPOINT = '/suite/save' RENAME_SUITE_DIRECTORY_ENDPOINT = '/suite/directory/rename' DELETE_SUITE_DIRECTORY_ENDPOINT = '/suite/directory/delete' def delete_suite(project_name, suite_name, user=None): return requests.delete(url(DELETE_SUITE_ENDPOINT), headers=headers(user), json={'project': project_name, 'fullPath': suite_name}) def duplicate_suite(project_name, suite_name, new_suite_name, user=None): json_ = { 'project': project_name, 'fullPath': suite_name, 'newFileFullPath': new_suite_name } return requests.post(url(DUPLICATE_SUITE_ENDPOINT), headers=headers(user), json=json_) def rename_suite(project_name, suite_name, new_suite_name, user=None): json_ = { 'project': project_name, 'fullFilename': suite_name, 'newFullFilename': new_suite_name } return requests.post(url(RENAME_SUITE_ENDPOINT), headers=headers(user), json=json_) def run_suite(project_name, suite_name, user=None): return requests.post(url(RUN_SUITE_ENDPOINT), headers=headers(user), json={'project': project_name, 'suite': suite_name}) def save_suite(project_name, suite_name, tests=[], processes=1, tags=[], browsers=[], environments=[], user=None): json_ = { 'project': project_name, 'suite': suite_name, 'tests': tests, 'processes': processes, 'tags': tags, 'browsers': browsers, 'environments': environments } return requests.put(url(SAVE_SUITE_ENDPOINT), headers=headers(user), json=json_) def rename_suite_directory(project_name, dir_name, new_dir_name, user=None): json_ = { 'project': project_name, 'fullDirname': dir_name, 'newFullDirname': new_dir_name } return requests.post(url(RENAME_SUITE_DIRECTORY_ENDPOINT), headers=headers(user), json=json_) def delete_suite_directory(project_name, dir_name, user=None): return requests.delete(url(DELETE_SUITE_DIRECTORY_ENDPOINT), headers=headers(user), json={'project': project_name, 'fullDirname': dir_name})
null
projects/golem_api/pages/suite.py
suite.py
py
2,383
python
en
code
null
code-starcoder2
83
[ { "api_name": "requests.delete", "line_number": 16, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.url", "line_number": 16, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.headers", "line_number": 16, "usage_type": "call" }, { "api_name": "requests.post", "line_number": 26, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.url", "line_number": 26, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.headers", "line_number": 26, "usage_type": "call" }, { "api_name": "requests.post", "line_number": 35, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.url", "line_number": 35, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.headers", "line_number": 35, "usage_type": "call" }, { "api_name": "requests.post", "line_number": 39, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.url", "line_number": 39, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.headers", "line_number": 39, "usage_type": "call" }, { "api_name": "requests.put", "line_number": 54, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.url", "line_number": 54, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.headers", "line_number": 54, "usage_type": "call" }, { "api_name": "requests.post", "line_number": 63, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.url", "line_number": 63, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.headers", "line_number": 63, "usage_type": "call" }, { "api_name": "requests.delete", "line_number": 67, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.url", "line_number": 67, "usage_type": "call" }, { "api_name": "projects.golem_api.pages.utils.headers", "line_number": 67, "usage_type": "call" } ]
340182951
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from openapi_server.models.base_model_ import Model from openapi_server.models.currency_stats import CurrencyStats from openapi_server.models.stats_note import StatsNote from openapi_server.models.stats_tags_source import StatsTagsSource from openapi_server.models.stats_tool import StatsTool from openapi_server.models.stats_version import StatsVersion from openapi_server import util from openapi_server.models.currency_stats import CurrencyStats # noqa: E501 from openapi_server.models.stats_note import StatsNote # noqa: E501 from openapi_server.models.stats_tags_source import StatsTagsSource # noqa: E501 from openapi_server.models.stats_tool import StatsTool # noqa: E501 from openapi_server.models.stats_version import StatsVersion # noqa: E501 class Stats(Model): """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually. """ def __init__(self, currencies=None, version=None, tools=None, tags_source=None, notes=None): # noqa: E501 """Stats - a model defined in OpenAPI :param currencies: The currencies of this Stats. # noqa: E501 :type currencies: List[CurrencyStats] :param version: The version of this Stats. # noqa: E501 :type version: StatsVersion :param tools: The tools of this Stats. # noqa: E501 :type tools: List[StatsTool] :param tags_source: The tags_source of this Stats. # noqa: E501 :type tags_source: StatsTagsSource :param notes: The notes of this Stats. # noqa: E501 :type notes: List[StatsNote] """ self.openapi_types = { 'currencies': List[CurrencyStats], 'version': StatsVersion, 'tools': List[StatsTool], 'tags_source': StatsTagsSource, 'notes': List[StatsNote] } self.attribute_map = { 'currencies': 'currencies', 'version': 'version', 'tools': 'tools', 'tags_source': 'tags_source', 'notes': 'notes' } self._currencies = currencies self._version = version self._tools = tools self._tags_source = tags_source self._notes = notes @classmethod def from_dict(cls, dikt) -> 'Stats': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The stats of this Stats. # noqa: E501 :rtype: Stats """ return util.deserialize_model(dikt, cls) def to_dict(self, prefix=""): """Returns the model as a dict: :return: The Stats as a dict :rtype: dict """ return { 'currencies': self._currencies, 'version': self._version, 'tools': self._tools, 'tags_source': self._tags_source, 'notes': self._notes } @property def currencies(self): """Gets the currencies of this Stats. :return: The currencies of this Stats. :rtype: List[CurrencyStats] """ return self._currencies @currencies.setter def currencies(self, currencies): """Sets the currencies of this Stats. :param currencies: The currencies of this Stats. :type currencies: List[CurrencyStats] """ self._currencies = currencies @property def version(self): """Gets the version of this Stats. :return: The version of this Stats. :rtype: StatsVersion """ return self._version @version.setter def version(self, version): """Sets the version of this Stats. :param version: The version of this Stats. :type version: StatsVersion """ self._version = version @property def tools(self): """Gets the tools of this Stats. :return: The tools of this Stats. :rtype: List[StatsTool] """ return self._tools @tools.setter def tools(self, tools): """Sets the tools of this Stats. :param tools: The tools of this Stats. :type tools: List[StatsTool] """ self._tools = tools @property def tags_source(self): """Gets the tags_source of this Stats. :return: The tags_source of this Stats. :rtype: StatsTagsSource """ return self._tags_source @tags_source.setter def tags_source(self, tags_source): """Sets the tags_source of this Stats. :param tags_source: The tags_source of this Stats. :type tags_source: StatsTagsSource """ self._tags_source = tags_source @property def notes(self): """Gets the notes of this Stats. :return: The notes of this Stats. :rtype: List[StatsNote] """ return self._notes @notes.setter def notes(self, notes): """Sets the notes of this Stats. :param notes: The notes of this Stats. :type notes: List[StatsNote] """ self._notes = notes
null
openapi_server/models/stats.py
stats.py
py
5,245
python
en
code
null
code-starcoder2
83
[ { "api_name": "openapi_server.models.base_model_.Model", "line_number": 22, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 43, "usage_type": "name" }, { "api_name": "openapi_server.models.currency_stats.CurrencyStats", "line_number": 43, "usage_type": "name" }, { "api_name": "openapi_server.models.stats_version.StatsVersion", "line_number": 44, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 45, "usage_type": "name" }, { "api_name": "openapi_server.models.stats_tool.StatsTool", "line_number": 45, "usage_type": "name" }, { "api_name": "openapi_server.models.stats_tags_source.StatsTagsSource", "line_number": 46, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 47, "usage_type": "name" }, { "api_name": "openapi_server.models.stats_note.StatsNote", "line_number": 47, "usage_type": "name" }, { "api_name": "openapi_server.util.deserialize_model", "line_number": 73, "usage_type": "call" }, { "api_name": "openapi_server.util", "line_number": 73, "usage_type": "name" } ]
532889033
#!/usr/bin/env python3 ''' The MIT License (MIT) Copyright (c) 2017, 2018 Erik Perillo <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import numpy as np import random from config import train as conf random.seed(conf['rand_seed']) np.random.seed(conf['rand_seed'] + 1) import tensorflow as tf import subprocess as sp import os import sys import shutil import argparse from collections import OrderedDict import model import trloop import util def populate_out_dir(out_dir, train_set, val_set): ''' Populates output dir with info files. ''' #info file with open(os.path.join(out_dir, 'etc', 'train-log', 'info.txt'), 'w') as f: print('date created (y-m-d):', util.date_str(), file=f) print('time created:', util.time_str(), file=f) print('git commit hash:', util.git_hash(), file=f) #saving train/val filepaths with open(os.path.join(out_dir, 'input', 'train.csv'), 'w') as f: for path in train_set: print(path, file=f) with open(os.path.join(out_dir, 'input', 'val.csv'), 'w') as f: for path in val_set: print(path, file=f) def train(): #parsing possible command-line arguments parser = argparse.ArgumentParser() parser.add_argument('--output_dir_path', type=str, nargs='?', help='path to directory to save train data', default=conf['output_dir_path']) parser.add_argument('--pre_trained_model_path', type=str, nargs='?', help='path to pre-trained model', default=conf['pre_trained_model_path']) parser.add_argument('--train_set', type=str, nargs='?', help='path to csv list of train set paths', default=conf['train_set']) parser.add_argument('--val_set', type=str, nargs='?', help='path to csv list of validation set paths', default=conf['val_set']) args = parser.parse_args() #getting output_dir_path output_dir_path = args.output_dir_path #getting pre_trained_model_path pre_trained_model_path = args.pre_trained_model_path #getting train_set train_set = util.get_paths(args.train_set) #getting val val_set = util.get_paths(args.val_set) out_dir = util.mk_model_dir(output_dir_path) print('created out dir \'{}\', populating...'.format(out_dir), flush=True, end=' ') populate_out_dir(out_dir, train_set, val_set) print('done.') #meta-model meta_model_kwargs = dict(conf['meta_model_kwargs']) if 'rand_seed' not in meta_model_kwargs: meta_model_kwargs['rand_seed'] = conf['rand_seed'] + 2 meta_model = model.MetaModel(**meta_model_kwargs) #creating logging object log = util.Tee([sys.stdout, open(os.path.join(out_dir, 'etc', 'train-log', 'train.log'), 'w')]) #building graph if pre_trained_model_path is None: log.print('[info] building graph for the first time') graph = meta_model.build_graph() else: graph = tf.Graph() #tensorboard logging paths summ_dir = os.path.join(out_dir, 'etc', 'train-log', 'summaries') #training session with tf.Session(graph=graph) as sess: #if first time training, creates graph collections for model params #else, loads model weights and params from collections if pre_trained_model_path is None: sess.run( tf.group( tf.global_variables_initializer(), tf.local_variables_initializer())) meta_model.mk_params_colls(graph=graph) else: log.print('[info] loading graph/weights from \'{}\''.format( pre_trained_model_path)) model.load(sess, pre_trained_model_path) meta_model.set_params_from_colls(graph=graph) #building functions #train function: cumputes loss _train_fn = meta_model.get_train_fn(sess) def train_fn(x, y_true): return _train_fn(x, y_true, { meta_model.params['learning_rate']: conf['learning_rate'] }) #test function: returns a dict with pairs metric_name: metric_value _test_fn = meta_model.get_test_fn(sess) def test_fn(x, y_true): metrics_values = _test_fn(x, y_true) return OrderedDict(zip( meta_model.params['metrics'].keys(), metrics_values)) #save model function: given epoch and iter number, saves checkpoint def save_model_fn(epoch=None, it=None, name=None): if name is None: path = os.path.join(out_dir, 'self', 'ckpts', 'epoch-{}_it-{}'.format(epoch, it)) else: path = os.path.join(out_dir, 'self', 'ckpts', '{}'.format(name)) model.save(sess, path, overwrite=True) print(' saved checkpoint to \'{}\''.format(path)) #test if conf['use_tensorboard']: #tensorboard summary writers train_writer = tf.summary.FileWriter( os.path.join(summ_dir, 'train'), graph=graph) val_writer = tf.summary.FileWriter( os.path.join(summ_dir, 'val'), graph=graph) #running tensorboard cmd = ['tensorboard', '--logdir={}'.format(summ_dir)] cmd.extend('--{}={}'.format(k, v) \ for k, v in conf['tensorboard_params'].items()) log.print('[info] running \'{}\''.format(' '.join(cmd))) proc = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) _log_fn = meta_model.get_summary_fn(sess) def log_fn(x, y_true, its, train=True): summ = _log_fn(x, y_true) if train: train_writer.add_summary(summ, its) if its%10 == 0: train_writer.flush() else: val_writer.add_summary(summ, its) if its%10 == 0: val_writer.flush() else: log_fn = None #main train loop print('calling train loop') try: trloop.train_loop( train_set=train_set, train_fn=train_fn, n_epochs=conf['n_epochs'], val_set=val_set, val_fn=test_fn, val_every_its=conf['val_every_its'], patience=conf['patience'], log_every_its=conf['log_every_its'], log_fn=log_fn, save_model_fn=save_model_fn, save_every_its=conf['save_every_its'], batch_gen_kw=conf['batch_gen_kw'], log_batch_gen_kw=conf['log_batch_gen_kw'], better_loss_tol=conf['better_loss_tol'], verbose=conf['verbose'], print_fn=log.print, ) except KeyboardInterrupt: print('Keyboard Interrupt event.') finally: #closing tensorboard writers if conf['use_tensorboard']: train_writer.close() val_writer.close() #saving model on final state path = os.path.join(out_dir, 'self', 'ckpts', 'final') print('saving checkpoint to \'{}\'...'.format(path), flush=True) model.save(sess, path, overwrite=True) print('\ndone.', flush=True) def main(): train() if __name__ == '__main__': train()
null
deeppeek-train-infer/code/train.py
train.py
py
8,439
python
en
code
null
code-starcoder2
83
[ { "api_name": "random.seed", "line_number": 31, "usage_type": "call" }, { "api_name": "config.train", "line_number": 31, "usage_type": "name" }, { "api_name": "numpy.random.seed", "line_number": 32, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 32, "usage_type": "attribute" }, { "api_name": "config.train", "line_number": 32, "usage_type": "name" }, { "api_name": "os.path.join", "line_number": 51, "usage_type": "call" }, { "api_name": "os.path", "line_number": 51, "usage_type": "attribute" }, { "api_name": "util.date_str", "line_number": 52, "usage_type": "call" }, { "api_name": "util.time_str", "line_number": 53, "usage_type": "call" }, { "api_name": "util.git_hash", "line_number": 54, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 57, "usage_type": "call" }, { "api_name": "os.path", "line_number": 57, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 61, "usage_type": "call" }, { "api_name": "os.path", "line_number": 61, "usage_type": "attribute" }, { "api_name": "argparse.ArgumentParser", "line_number": 67, "usage_type": "call" }, { "api_name": "config.train", "line_number": 70, "usage_type": "name" }, { "api_name": "config.train", "line_number": 73, "usage_type": "name" }, { "api_name": "config.train", "line_number": 76, "usage_type": "name" }, { "api_name": "config.train", "line_number": 79, "usage_type": "name" }, { "api_name": "util.get_paths", "line_number": 87, "usage_type": "call" }, { "api_name": "util.get_paths", "line_number": 89, "usage_type": "call" }, { "api_name": "util.mk_model_dir", "line_number": 91, "usage_type": "call" }, { "api_name": "config.train", "line_number": 98, "usage_type": "name" }, { "api_name": "config.train", "line_number": 100, "usage_type": "name" }, { "api_name": "model.MetaModel", "line_number": 101, "usage_type": "call" }, { "api_name": "util.Tee", "line_number": 104, "usage_type": "call" }, { "api_name": "sys.stdout", "line_number": 104, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 105, "usage_type": "call" }, { "api_name": "os.path", "line_number": 105, "usage_type": "attribute" }, { "api_name": "tensorflow.Graph", "line_number": 112, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 115, "usage_type": "call" }, { "api_name": "os.path", "line_number": 115, "usage_type": "attribute" }, { "api_name": "tensorflow.Session", "line_number": 118, "usage_type": "call" }, { "api_name": "tensorflow.group", "line_number": 123, "usage_type": "call" }, { "api_name": "tensorflow.global_variables_initializer", "line_number": 124, "usage_type": "call" }, { "api_name": "tensorflow.local_variables_initializer", "line_number": 125, "usage_type": "call" }, { "api_name": "model.load", "line_number": 130, "usage_type": "call" }, { "api_name": "config.train", "line_number": 138, "usage_type": "name" }, { "api_name": "collections.OrderedDict", "line_number": 145, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 151, "usage_type": "call" }, { "api_name": "os.path", "line_number": 151, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 154, "usage_type": "call" }, { "api_name": "os.path", "line_number": 154, "usage_type": "attribute" }, { "api_name": "model.save", "line_number": 155, "usage_type": "call" }, { "api_name": "config.train", "line_number": 159, "usage_type": "name" }, { "api_name": "tensorflow.summary.FileWriter", "line_number": 161, "usage_type": "call" }, { "api_name": "tensorflow.summary", "line_number": 161, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 162, "usage_type": "call" }, { "api_name": "os.path", "line_number": 162, "usage_type": "attribute" }, { "api_name": "tensorflow.summary.FileWriter", "line_number": 163, "usage_type": "call" }, { "api_name": "tensorflow.summary", "line_number": 163, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 164, "usage_type": "call" }, { "api_name": "os.path", "line_number": 164, "usage_type": "attribute" }, { "api_name": "config.train", "line_number": 168, "usage_type": "name" }, { "api_name": "subprocess.Popen", "line_number": 170, "usage_type": "call" }, { "api_name": "subprocess.PIPE", "line_number": 170, "usage_type": "attribute" }, { "api_name": "trloop.train_loop", "line_number": 189, "usage_type": "call" }, { "api_name": "config.train", "line_number": 192, "usage_type": "name" }, { "api_name": "config.train", "line_number": 195, "usage_type": "name" }, { "api_name": "config.train", "line_number": 196, "usage_type": "name" }, { "api_name": "config.train", "line_number": 197, "usage_type": "name" }, { "api_name": "config.train", "line_number": 200, "usage_type": "name" }, { "api_name": "config.train", "line_number": 201, "usage_type": "name" }, { "api_name": "config.train", "line_number": 202, "usage_type": "name" }, { "api_name": "config.train", "line_number": 203, "usage_type": "name" }, { "api_name": "config.train", "line_number": 204, "usage_type": "name" }, { "api_name": "config.train", "line_number": 211, "usage_type": "name" }, { "api_name": "os.path.join", "line_number": 216, "usage_type": "call" }, { "api_name": "os.path", "line_number": 216, "usage_type": "attribute" }, { "api_name": "model.save", "line_number": 218, "usage_type": "call" } ]
508799213
#!/usr/bin/env python # -*- coding:utf-8 -*- #็”ŸๆˆไบŒ็ปด็  from PIL import Image import qrcode def create_qr_code(data, version=7, box_size=10, border=4): """ ๅ‚ๆ•ฐ version ่กจ็คบ็”ŸๆˆไบŒ็ปด็ ็š„ๅฐบๅฏธๅคงๅฐ๏ผŒๅ–ๅ€ผ่Œƒๅ›ดๆ˜ฏ 1 ่‡ณ 40๏ผŒ ๆœ€ๅฐๅฐบๅฏธ 1 ไผš็”Ÿๆˆ 21 * 21 ็š„ไบŒ็ปด็ ๏ผŒversion ๆฏๅขžๅŠ  1๏ผŒ็”Ÿๆˆ็š„ไบŒ็ปด็ ๅฐฑไผšๆทปๅŠ  4 ๅฐบๅฏธ๏ผŒ ไพ‹ๅฆ‚ version ๆ˜ฏ 2๏ผŒๅˆ™็”Ÿๆˆ 25 * 25 ็š„ไบŒ็ปด็ ใ€‚ ๅ‚ๆ•ฐ error_correction ๆŒ‡ๅฎšไบŒ็ปด็ ็š„ๅฎน้”™็ณปๆ•ฐ๏ผŒๅˆ†ๅˆซๆœ‰ไปฅไธ‹4ไธช็ณปๆ•ฐ๏ผš 1.ERROR_CORRECT_L: 7%็š„ๅญ—็ ๅฏ่ขซๅฎน้”™ 2.ERROR_CORRECT_M: 15%็š„ๅญ—็ ๅฏ่ขซๅฎน้”™ 3.ERROR_CORRECT_Q: 25%็š„ๅญ—็ ๅฏ่ขซๅฎน้”™ 4.ERROR_CORRECT_H: 30%็š„ๅญ—็ ๅฏ่ขซๅฎน้”™ ๅฏไปฅ็”ŸๆˆไบŒ็ปด็ ๅ›พ็‰‡๏ผŒๆ นๆฎๅ‚ๆ•ฐ ๅ‚ๆ•ฐ box_size ่กจ็คบไบŒ็ปด็ ้‡Œๆฏไธชๆ ผๅญ็š„ๅƒ็ด ๅคงๅฐใ€‚ ๅ‚ๆ•ฐ border ่กจ็คบ่พนๆก†็š„ๆ ผๅญๅŽšๅบฆๆ˜ฏๅคšๅฐ‘๏ผˆ้ป˜่ฎคๆ˜ฏ4๏ผ‰ใ€‚ :param data: ไฝ ่ฆ็”ŸๆˆไบŒ็ปด็ ็š„ๆ•ฐๆฎ๏ผŒๅฆ‚ url ็ฝ‘ๅ€ ๆˆ–่€… "ๆˆ‘็ˆฑไฝ ๆˆๅ…ƒ" :return: img ่ฟ”ๅ›ž็š„ๆ˜ฏๅ›พ็‰‡๏ผŒๅฆ‚ๆžœ้œ€่ฆไฟๅญ˜ๅฐฑ image.save() """ qr = qrcode.QRCode( version=version, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=box_size, border=border ) qr.add_data(data) #qr.add_data("ๆˆ‘็ˆฑไฝ ๆˆๅ…ƒ") qr.make(fit=True) img = qr.make_image() return img def create_mid_pic_code(data, path): """ ็”Ÿๆˆไธญ้—ดๅธฆๅ›พ็‰‡็š„ไบŒ็ปด็  :param data: ไบŒ็ปด็ ๅ†…ๅฎน :param path: ๅฐ†่ฆๆ”พๅœจไบŒ็ปด็ ไธญ้—ด็š„ๅ›พ็‰‡่ทฏๅพ„ :return: img ่ฟ”ๅ›žๅˆถไฝœๅฅฝ็š„ๅ›พ็‰‡ """ qr = qrcode.QRCode( version=4, error_correction=qrcode.constants.ERROR_CORRECT_H, box_size=10, border=2 ) qr.add_data(data) qr.make(fit=True) img = qr.make_image() img = img.convert("RGBA") # ๆ‰“ๅผ€่ฆๆทปๅŠ ็š„ๅ›พ็‰‡ๆ–‡ไปถๅฏน่ฑก picture = Image.open(path) img_w, img_h = img.size factor = 4 size_w = int(img_w / factor) size_h = int(img_h / factor) picture_w, picture_h = picture.size if picture_w > size_w: picture_w = size_w if picture_h > size_h: picture_h = size_h picture = picture.resize((picture_w, picture_h), Image.ANTIALIAS) w = int((img_w - picture_w) / 2) h = int((img_h - picture_h) / 2) img.paste(picture, (w, h), picture) return img
null
qr_code.py
qr_code.py
py
2,376
python
en
code
null
code-starcoder2
83
[ { "api_name": "qrcode.QRCode", "line_number": 27, "usage_type": "call" }, { "api_name": "qrcode.constants", "line_number": 29, "usage_type": "attribute" }, { "api_name": "qrcode.QRCode", "line_number": 46, "usage_type": "call" }, { "api_name": "qrcode.constants", "line_number": 48, "usage_type": "attribute" }, { "api_name": "PIL.Image.open", "line_number": 59, "usage_type": "call" }, { "api_name": "PIL.Image", "line_number": 59, "usage_type": "name" }, { "api_name": "PIL.Image.ANTIALIAS", "line_number": 71, "usage_type": "attribute" }, { "api_name": "PIL.Image", "line_number": 71, "usage_type": "name" } ]
467828372
# coding=utf-8 import math import logging import getopt import sys logger = logging.getLogger() def convert_axes(qax, qay, qaz, pa, ra, yaw_control=False): """ Axes: Convert the acceleration in g's to earth coordinates, then integrate to convert to speeds in earth's X and Y axes meters per second. Matrix 1: Uses X, Y, and Y accelerometers but omit yaw --------- |eax| | cos(pitch), 0, -sin(pitch)| |qax| |eay| = | 0, cos(roll), -sin(roll)| |qay| |eaz| | sin(pitch), sin(roll), cos(pitch).cos(roll)| |qaz| Matrix 2: Uses X, Y, and Y accelerometers and include yaw (unsupported) --------- |eax| | cos(pitch), sin(roll), -sin(pitch)| |qax| |eay| = | sin(pitch), cos(roll), -sin(roll)| |qay| |eaz| | sin(pitch), sin(roll), cos(pitch).cos(roll)| |qaz| @param qax: @param qay: @param qaz: @param pa: @param ra: @param yaw_control: Boolean (Unused) @return: """ if not yaw_control: eax = qax * math.cos(pa) - qaz * math.sin(pa) eay = qay * math.cos(ra) - qaz * math.sin(ra) eaz = qaz * math.cos(pa) * math.cos(ra) + qax * math.sin(pa) + qay * math.sin(ra) - 1.0 else: eax = qax * math.cos(pa) + qay * math.sin(ra) - qaz * math.sin(pa) eay = qay * math.cos(ra) * qax * math.sin(pa) - qaz * math.sin(ra) eaz = qaz * math.cos(pa) * math.cos(ra) + qax * math.sin(pa) + qay * math.sin(ra) - 1.0 return eax, eay, eaz def check_cli(argv): """ Check CLI validity, set calibrate_sensors / fly or sys.exit(1) @param argv: @return: """ cli_fly = False cli_calibrate_sensors = False cli_video = False cli_hover_target = 680 #----------------------------------------------------------------------------------- # Defaults for vertical velocity PIDs #----------------------------------------------------------------------------------- cli_vvp_gain = 300.0 cli_vvi_gain = 150.0 cli_vvd_gain = 0.0 #----------------------------------------------------------------------------------- # Defaults for horizontal velocity PIDs #----------------------------------------------------------------------------------- cli_hvp_gain = 0.6 cli_hvi_gain = 0.1 cli_hvd_gain = 0.0 #----------------------------------------------------------------------------------- # Defaults for absolute angle PIDs #----------------------------------------------------------------------------------- cli_aap_gain = 2.5 cli_aai_gain = 0.0 cli_aad_gain = 0.0 #----------------------------------------------------------------------------------- # Defaults for rotation rate PIDs #----------------------------------------------------------------------------------- cli_rrp_gain = 150 cli_rri_gain = 0.0 cli_rrd_gain = 0.0 #----------------------------------------------------------------------------------- # Other configuration defaults #----------------------------------------------------------------------------------- cli_test_case = 0 cli_tau = 2.0 # 0.25 * 100 = 25 samples averaged for -3dB merge cli_dlpf = 5 cli_loop_frequency = 500 # 100 cli_matrix = 2 cli_statistics = False cli_yaw_control = False cli_motion_frequency = 40 cli_attitude_frequency = 40 hover_target_defaulted = True no_drift_control = False rrp_set = False rri_set = False rrd_set = False aap_set = False aai_set = False aad_set = False #----------------------------------------------------------------------------------- # Right, let's get on with reading the command line and checking consistency #----------------------------------------------------------------------------------- try: opts, args = getopt.getopt(argv,'a:fcvh:l:m:nsy', ['tc=', 'vvp=', 'vvi=', 'vvd=', 'hvp=', 'hvi=', 'hvd=', 'aap=', 'aai=', 'aad=', 'arp=', 'ari=', 'ard=', 'tau=', 'dlpf=']) except getopt.GetoptError: logger.critical('qcpi.py [-f][-h hover_target][-v][') sys.exit(2) for opt, arg in opts: if opt == '-f': cli_fly = True elif opt in '-h': cli_hover_target = int(arg) hover_target_defaulted = False elif opt in '-v': cli_video = True elif opt in '-a': cli_attitude_frequency = int(arg) elif opt in '-c': cli_calibrate_sensors = True elif opt in '-l': cli_loop_frequency = int(arg) elif opt in '-m': cli_motion_frequency = int(arg) elif opt in '-n': no_drift_control = True elif opt in '-s': cli_statistics = True elif opt in '-y': cli_yaw_control = True elif opt in '--vvp': cli_vvp_gain = float(arg) elif opt in '--vvi': cli_vvi_gain = float(arg) elif opt in '--vvd': cli_vvd_gain = float(arg) elif opt in '--hvp': cli_hvp_gain = float(arg) elif opt in '--hvi': cli_hvi_gain = float(arg) elif opt in '--hvd': cli_hvd_gain = float(arg) elif opt in '--aap': cli_aap_gain = float(arg) aap_set = True elif opt in '--aai': cli_aai_gain = float(arg) aai_set = True elif opt in '--aad': cli_aad_gain = float(arg) aad_set = True elif opt in '--arp': cli_rrp_gain = float(arg) rrp_set = True elif opt in '--ari': cli_rri_gain = float(arg) rri_set = True elif opt in '--ard': cli_rrd_gain = float(arg) rrd_set = True elif opt in '--tc': cli_test_case = int(arg) elif opt in '--tau': cli_tau = float(arg) elif opt in '--dlpf': cli_dlpf = int(arg) if not cli_calibrate_sensors and not cli_fly and cli_test_case == 0: logger.critical('Must specify one of -f or -c or --tc') logger.critical(' qcpi.py [-f] [-t speed] [-c] [-v]') logger.critical(' -f set whether to fly') logger.critical(' -h set the hover speed for manual testing') logger.critical(' -c calibrate sensors against temperature and save') logger.critical(' -s enable diagnostic statistics') logger.critical(' -n disable drift control') logger.critical(' -y enable yaw control (unsupported') logger.critical(' -v video the flight') logger.critical(' -l ?? set the processing loop frequency') logger.critical(' -a ?? set attitude PID update frequency') logger.critical(' -m ?? set motion PID update frequency') logger.critical(' --vvp set vertical speed PID P gain') logger.critical(' --vvi set vertical speed PID P gain') logger.critical(' --vvd set vertical speed PID P gain') logger.critical(' --hvp set horizontal speed PID P gain') logger.critical(' --hvi set horizontal speed PID I gain') logger.critical(' --hvd set horizontal speed PID D gain') logger.critical(' --aap set absolute angle PID P gain') logger.critical(' --aai set absolute angle PID I gain') logger.critical(' --aad set absolute angle PID D gain') logger.critical(' --arp set angular PID P gain') logger.critical(' --ari set angular PID I gain') logger.critical(' --ari set angular PID D gain') logger.critical(' --tc select which testcase to run') logger.critical(' --tau set the complementary filter period') logger.critical(' --dlpf set the digital low pass filter') sys.exit(2) elif not cli_calibrate_sensors and (cli_hover_target < 0 or cli_hover_target > 1000): logger.critical('Hover speed must lie in the following range') logger.critical('0 <= test speed <= 1000') sys.exit(2) elif cli_yaw_control: logger.critical('YAW control is not supported yet') sys.exit(2) elif cli_test_case == 0 and cli_fly: logger.critical('Pre-flight checks passed, enjoy your flight, sir!') if no_drift_control: cli_hvp_gain = 0.0 cli_hvi_gain = 0.0 cli_hvd_gain = 0.0 cli_aap_gain = 1.5 cli_aai_gain = 0.5 cli_aad_gain = 0.01 cli_rrp_gain = 110 cli_rri_gain = 100 cli_rrd_gain = 2.5 elif cli_test_case == 0 and cli_calibrate_sensors: logger.critical('Calibrate sensors is it, sir!') elif cli_test_case == 0: logger.critical('You must specify flight (-f) or gravity calibration (-c)') sys.exit(2) elif cli_fly or cli_calibrate_sensors: logger.critical('Choose a specific test case (--tc) or fly (-f) or calibrate gravity (-g)') sys.exit(2) #--------------------------------------------------------------------------------------- # Test case 1: Check all the blades work and spin in the right direction # Test case 2: Tune the rotational rate PIDs # Test case 3: Tune the absolute angle PIDs # Test case 4: Tune the hover speed #--------------------------------------------------------------------------------------- elif cli_test_case < 1 or cli_test_case > 4: logger.critical('Select test case 1, 2, 3 or 4') sys.exit(2) elif hover_target_defaulted: logger.critical('You must choose a specific hover speed (-h) for all test cases.') sys.exit(2) elif cli_test_case == 2 and (not rrp_set or not rri_set or not rrd_set): logger.critical('You must choose a starting point for the angular rate PID P, I and D gains') logger.critical('Try sudo python ./qc.py --tc 2 -h 450 --arp 50 --ari 0.0 --ard 0.0 and work up from there') sys.exit(2) elif cli_test_case == 3 and (not aap_set or not aai_set or not aad_set): logger.critical('You must choose a starting point for the absolute angle PID P, I and D gains') logger.critical('Try sudo python ./qc.py --tc 3 -h 450 --aap 1.5 --aai 0.5 --aad 0.001 and work up from there') sys.exit(2) elif cli_test_case == 2: cli_vvp_gain = 0.0 cli_vvi_gain = 0.0 cli_vvd_gain = 0.0 cli_hvp_gain = 0.0 cli_hvi_gain = 0.0 cli_hvd_gain = 0.0 cli_aap_gain = 0.0 cli_aai_gain = 0.0 cli_aad_gain = 0.0 elif cli_test_case == 3 or cli_test_case == 4: cli_vvp_gain = 0.0 cli_vvi_gain = 0.0 cli_vvd_gain = 0.0 cli_hvp_gain = 0.0 cli_hvi_gain = 0.0 cli_hvd_gain = 0.0 return cli_calibrate_sensors, cli_fly, cli_hover_target, cli_video, cli_vvp_gain, cli_vvi_gain, cli_vvd_gain, cli_hvp_gain, cli_hvi_gain, cli_hvd_gain, cli_aap_gain, cli_aai_gain, cli_aad_gain, cli_rrp_gain, cli_rri_gain, cli_rrd_gain, cli_test_case, cli_tau, cli_dlpf, cli_loop_frequency, cli_motion_frequency, cli_attitude_frequency, cli_statistics, cli_yaw_control
null
py/car/nodes/components/accel/qc/utils.py
utils.py
py
11,237
python
en
code
null
code-starcoder2
83
[ { "api_name": "logging.getLogger", "line_number": 7, "usage_type": "call" }, { "api_name": "math.cos", "line_number": 36, "usage_type": "call" }, { "api_name": "math.sin", "line_number": 36, "usage_type": "call" }, { "api_name": "math.cos", "line_number": 37, "usage_type": "call" }, { "api_name": "math.sin", "line_number": 37, "usage_type": "call" }, { "api_name": "math.cos", "line_number": 38, "usage_type": "call" }, { "api_name": "math.sin", "line_number": 38, "usage_type": "call" }, { "api_name": "math.cos", "line_number": 41, "usage_type": "call" }, { "api_name": "math.sin", "line_number": 41, "usage_type": "call" }, { "api_name": "math.cos", "line_number": 42, "usage_type": "call" }, { "api_name": "math.sin", "line_number": 42, "usage_type": "call" }, { "api_name": "math.cos", "line_number": 43, "usage_type": "call" }, { "api_name": "math.sin", "line_number": 43, "usage_type": "call" }, { "api_name": "getopt.getopt", "line_number": 115, "usage_type": "call" }, { "api_name": "getopt.GetoptError", "line_number": 116, "usage_type": "attribute" }, { "api_name": "sys.exit", "line_number": 118, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 231, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 236, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 240, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 260, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 264, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 275, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 279, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 284, "usage_type": "call" }, { "api_name": "sys.exit", "line_number": 289, "usage_type": "call" } ]
447965413
import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import numpy as np import json from tqdm import tqdm, trange from utils import TensorboardWriter from network import AE def save_checkpoint(epoch, model, optimizer, path): state = { 'Epoch': epoch, 'State_dict': model.state_dict(), 'optimizer': optimizer.state_dict() } torch.save(state, path) def load_checkpoint(model, optimizer, path): checkpoint = torch.load(path) model.load_state_dict(checkpoint['State_dict']) optimizer.load_state_dict(checkpoint['optimizer']) class Solver(object): def __init__(self, config=None, train_loader=None, test_loader=None): """Class that Builds, Trains and Evaluates SUM-GAN model""" self.config = config self.train_loader = train_loader self.test_loader = test_loader def build(self): self.linear_compress = nn.Linear( self.config.input_size, self.config.hidden_size).cuda() self.AE = AE(input_size=self.config.hidden_size, hidden_size=self.config.hidden_size, num_layers=self.config.num_layers).cuda() self.model = nn.ModuleList([ self.linear_compress, self.AE]) # Build Modules if self.config.mode == 'train': # Build Optimizers self.ae_optimizer = optim.Adam( list(self.AE.e_lstm.parameters()) + list(self.AE.d_lstm.parameters()) + list(self.linear_compress.parameters()), lr=self.config.lr) # ์ €์žฅ๋œ ๋ชจ๋ธ ๋ถˆ๋Ÿฌ์™€์„œ ์ด์–ด์„œ ํ•™์Šต if self.config.pre_trained: load_checkpoint(self.model, self.ae_optimizer, self.config.model_dir) self.AE.train() self.writer = TensorboardWriter(self.config.log_dir) def train(self): # ์ด์–ด์„œ ํ•™์Šตํ•˜๋ ค๋ฉด epochs์™€ step์„ ์ด์–ด์„œ ๊ณ„์‚ฐ if self.config.pre_trained: md = self.config.model_dir n_epochs = int(md[md.find('epoch-') + 6:md.find('pkl') - 1]) epochs = tqdm(range(n_epochs, n_epochs + self.config.n_epochs), desc='Epoch', ncols=80) video_type = ['SumMe', 'TvSum', 'OVP', 'All'] train_example_num = [20, 40, 80, 175] step = n_epochs * train_example_num[video_type.index(self.config.video_type)] else: step = 0 n_epochs = self.config.n_epochs epochs = tqdm(range(n_epochs), desc='Epoch', ncols=80) print(n_epochs) mse_loss = nn.MSELoss() # tqdm ์„ค์ • for epoch_i in epochs: loss_history = [] if self.config.verbose: tqdm.write('\nTraining') for batch_i, image_features in enumerate(tqdm( self.train_loader, desc='Batch', ncols=80, leave=False)): # ๋‚ด๊ฐ€ ์ˆ˜์ •ํ•œ ์ฝ”๋“œ / ์ด๋ฏธ์ง€ ์žฅ์ˆ˜๋กœ ๊ฑด๋„ˆ๋›ฐ๊ธฐ ์ผ๋‹จ ์ œํ•œ ์—†์ด tqdm.write(f'\n------{batch_i}th Batch: {image_features.size(1)} size') image_features = image_features.view(-1, 1024) image_features_ = Variable(image_features).cuda() # ---- Train sLSTM, eLSTM ----# original_features = self.linear_compress(image_features_.detach()).unsqueeze(1) decoded_features = self.AE(original_features) loss = mse_loss(original_features, decoded_features) tqdm.write(f'loss: {loss}') self.ae_optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(self.AE.parameters(), self.config.clip) self.ae_optimizer.step() loss_history.append(loss.data) self.writer.update_loss(loss.data, step, 'reconstruct_loss') step += 1 epoch_loss = torch.stack(loss_history).mean() # Plot if self.config.verbose: tqdm.write('Plotting...') self.writer.update_loss(epoch_loss, epoch_i, 'loss_epochs') # Save parameters at checkpoint every five epoch if (epoch_i + 1) % 10 == 0: ckpt_path = str(self.config.save_dir) + f'_epoch-{epoch_i + 1}.pkl' tqdm.write(f'Save parameters at {ckpt_path}') save_checkpoint(epoch_i+1, self.model, self.ae_optimizer, ckpt_path) def evaluate(self, model_path): self.AE.load_state_dict(torch.load(model_path)) self.AE.eval() mse_loss = nn.MSELoss() out_dict = {} print(self.test_loader) losses = [] for video_tensor, video_name in tqdm(self.test_loader, desc='Evaluate', ncols=80, leave=False): # [seq_len, batch=1, 2048] video_tensor = video_tensor.view(-1, self.config.input_size) video_feature = Variable(video_tensor, volatile=True).cuda() # [seq_len, 1, hidden_size] original_feature = self.linear_compress(video_feature.detach()).unsqueeze(1) decoded_feature = self.AE(original_feature) loss = mse_loss(original_feature, decoded_feature) losses.append(loss) mean_loss = np.mean(losses).squeeze() tqdm.write(f'loss = {mean_loss.data}.')
null
original/autoencoder/solver.py
solver.py
py
5,434
python
en
code
null
code-starcoder2
83
[ { "api_name": "torch.save", "line_number": 18, "usage_type": "call" }, { "api_name": "torch.load", "line_number": 22, "usage_type": "call" }, { "api_name": "torch.nn.Linear", "line_number": 35, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 35, "usage_type": "name" }, { "api_name": "network.AE", "line_number": 39, "usage_type": "call" }, { "api_name": "torch.nn.ModuleList", "line_number": 42, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 42, "usage_type": "name" }, { "api_name": "torch.optim.Adam", "line_number": 47, "usage_type": "call" }, { "api_name": "torch.optim", "line_number": 47, "usage_type": "name" }, { "api_name": "utils.TensorboardWriter", "line_number": 58, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 65, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 72, "usage_type": "call" }, { "api_name": "torch.nn.MSELoss", "line_number": 76, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 76, "usage_type": "name" }, { "api_name": "tqdm.tqdm.write", "line_number": 82, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 82, "usage_type": "name" }, { "api_name": "tqdm.tqdm", "line_number": 83, "usage_type": "call" }, { "api_name": "tqdm.tqdm.write", "line_number": 86, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 86, "usage_type": "name" }, { "api_name": "torch.autograd.Variable", "line_number": 89, "usage_type": "call" }, { "api_name": "tqdm.tqdm.write", "line_number": 97, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 97, "usage_type": "name" }, { "api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 101, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 101, "usage_type": "attribute" }, { "api_name": "torch.stack", "line_number": 108, "usage_type": "call" }, { "api_name": "tqdm.tqdm.write", "line_number": 112, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 112, "usage_type": "name" }, { "api_name": "tqdm.tqdm.write", "line_number": 118, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 118, "usage_type": "name" }, { "api_name": "torch.load", "line_number": 123, "usage_type": "call" }, { "api_name": "torch.nn.MSELoss", "line_number": 126, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 126, "usage_type": "name" }, { "api_name": "tqdm.tqdm", "line_number": 130, "usage_type": "call" }, { "api_name": "torch.autograd.Variable", "line_number": 134, "usage_type": "call" }, { "api_name": "numpy.mean", "line_number": 141, "usage_type": "call" }, { "api_name": "tqdm.tqdm.write", "line_number": 142, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 142, "usage_type": "name" } ]
289919892
#!/home/daniel/anaconda3/bin/python # -*- coding: utf-8 -*- """ ================================================ main_process_trt_data.py ================================================ This program processes TRT data obtaining plots of time series of parameters over the TRT cell trajectory """ # Author: fvj # License: BSD 3 clause import datetime import argparse import atexit import glob import os from shutil import copy from warnings import warn import numpy as np from pyrad.io import read_trt_traj_data, write_trt_cell_scores from pyrad.io import write_trt_cell_lightning from pyrad.util import belongs_roi_indices from pyrad.graph import plot_timeseries, plot_scatter_comp, plot_pos print(__doc__) def main(): """ """ # parse the arguments parser = argparse.ArgumentParser( description='Entry to Pyrad processing framework') # positional arguments parser.add_argument( 'days', nargs='+', type=str, help='Dates to process. Format YYYY-MM-DD') # keyword arguments parser.add_argument( '--trtbase', type=str, default='/store/msrad/radar/trt/', help='name of folder containing the TRT cell data') parser.add_argument( '--lon', type=str, default='8.9000010,9.2000000,9.4999970,9.4999970,8.9000010', help=('longitude of the points defining the perimeter of the area ' + 'of interest')) parser.add_argument( '--lat', type=str, default='47.0000030,47.0000030,47.0000030,47.5999930,47.5999930', help=('latitude of the points defining the perimeter of the area ' + 'of interest')) args = parser.parse_args() print("====== TRT cell processing started: %s" % datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) atexit.register(_print_end_msg, "====== comparison finished: ") time_dir_list = args.days lons = args.lon.split(',') lats = args.lat.split(',') if np.size(lons) != np.size(lats): warn( str(np.size(lons))+' longitudes but '+str(np.size(lats)) + ' latitudes. Their number must be equal') return lon_list = [] lat_list = [] for i, lon in enumerate(lons): lon_list.append(float(lon)) lat_list.append(float(lats[i])) roi = { 'lon': lon_list, 'lat': lat_list } # List for collection of max data cell_ID_max_list = [] nflashes_max_list = [] area_flash_max_list = [] flash_density_max_list = [] time_flash_density_max_list = [] flash_density_max_rank_list = [] rank_max_list = [] time_rank_max_list = [] # List for collection of flashes data cell_ID_list = np.ma.asarray([], dtype=int) time_list = np.ma.asarray([], dtype=datetime.datetime) lon_list = np.ma.asarray([], dtype=float) lat_list = np.ma.asarray([], dtype=float) flash_density_list = np.ma.asarray([], dtype=float) rank_flash_density_list = np.ma.asarray([], dtype=float) area_list = np.ma.asarray([], dtype=float) nflash_list = np.ma.asarray([], dtype=int) for i, time_dir in enumerate(time_dir_list): data_input_path = args.trtbase+time_dir+'/TRTC_cell/' data_output_base = args.trtbase+time_dir+'/TRTC_cell_plots/' flist = glob.glob(data_input_path+'*.trt') for fname in flist: print('Reading TRT trajectory file '+fname) (traj_ID, yyyymmddHHMM, lon, lat, _, _, _, area, vel_x, vel_y, det, RANKr, CG_n, CG_p, CG, _, ET45, ET45m, ET15, ET15m, VIL, maxH, maxHm, POH, _, _, _, _) = read_trt_traj_data(fname) inds, is_roi = belongs_roi_indices(lat, lon, roi) if is_roi == 'None': continue elif is_roi == 'Some' and len(lat[inds]) < 3: continue data_output_path = data_output_base+is_roi+'/' if not os.path.isdir(data_output_path): os.makedirs(data_output_path) # copy file copy(fname, data_output_path) # general caracteristics flash_density = CG/area cell_ID_max_list.append(traj_ID[0]) flash_density_max_list.append(np.max(flash_density)) nflashes_max_list.append(CG[np.argmax(flash_density)]) area_flash_max_list.append(area[np.argmax(flash_density)]) time_flash_density_max_list.append( yyyymmddHHMM[np.argmax(flash_density)]) flash_density_max_rank_list.append( RANKr[np.argmax(flash_density)]) rank_max_list.append(np.max(RANKr)) time_rank_max_list.append(yyyymmddHHMM[np.argmax(RANKr)]) cell_ID_list = np.append(cell_ID_list, traj_ID) time_list = np.append(time_list, yyyymmddHHMM) lon_list = np.append(lon_list, lon) lat_list = np.append(lat_list, lat) flash_density_list = np.append(flash_density_list, flash_density) rank_flash_density_list = np.append( rank_flash_density_list, RANKr) area_list = np.append(area_list, area) nflash_list = np.append(nflash_list, CG) # Time series plots figfname = data_output_path+str(traj_ID[0])+'_flash_density.png' plot_timeseries( yyyymmddHHMM, [flash_density], [figfname], labelx='Time UTC', labely='Flash density [flashes/km2]', title=str(traj_ID[0])+' flash density') figfname = data_output_path+str(traj_ID[0])+'_area.png' plot_timeseries( yyyymmddHHMM, [area], [figfname], labelx='Time UTC', labely='area [km2]', title=str(traj_ID[0])+' cell area') figfname = data_output_path+str(traj_ID[0])+'_vel.png' plot_timeseries( yyyymmddHHMM, [vel_x, vel_y], [figfname], labelx='Time UTC', labely='Velocity [km/h]', labels=['x speed', 'y speed'], title=str(traj_ID[0])+' cell velocity') figfname = data_output_path+str(traj_ID[0])+'_det.png' plot_timeseries( yyyymmddHHMM, [det], [figfname], labelx='Time UTC', labely='Detection threshold [dBZ]', title=str(traj_ID[0])+' cell detection threshold') figfname = data_output_path+str(traj_ID[0])+'_rank.png' plot_timeseries( yyyymmddHHMM, [RANKr], [figfname], labelx='Time UTC', labely='Rank [-]', title=str(traj_ID[0])+' cell rank') figfname = data_output_path+str(traj_ID[0])+'_lightning.png' plot_timeseries( yyyymmddHHMM, [CG_n, CG_p, CG], [figfname], labelx='Time UTC', labely='N flash [-]', labels=['CG-', 'CG+', 'CG'], title=str(traj_ID[0])+' flashes in cell') figfname = data_output_path+str(traj_ID[0])+'_ET.png' plot_timeseries( yyyymmddHHMM, [ET45, ET45m, ET15, ET15m], [figfname], labelx='Time UTC', labely='Echo Top [km]', labels=['ET45', 'ET45m', 'ET15', 'ET15m'], title=str(traj_ID[0])+' Echo top') figfname = data_output_path+str(traj_ID[0])+'_VIL.png' plot_timeseries( yyyymmddHHMM, [VIL], [figfname], labelx='Time UTC', labely='VIL [Kg/m2]', labels=['VIL'], title=str(traj_ID[0])+' VIL') figfname = data_output_path+str(traj_ID[0])+'_maxH.png' plot_timeseries( yyyymmddHHMM, [maxH, maxHm], [figfname], labelx='Time UTC', labely='Max. Echo Height [Km]', labels=['maxH', 'maxHm'], title=str(traj_ID[0])+' Height of Max. Reflectivity') figfname = data_output_path+str(traj_ID[0])+'_POH.png' plot_timeseries( yyyymmddHHMM, [POH], [figfname], labelx='Time UTC', labely='POH [%]', labels=['POH'], title=str(traj_ID[0])+' Probability of Hail') # plot position # get time since start of cell in s td_vec = yyyymmddHHMM-yyyymmddHHMM[0] tt_s = np.empty(td_vec.size, dtype=float) for j, td in enumerate(td_vec): tt_s[j] = td.total_seconds() cb_label = ( 'Time since '+yyyymmddHHMM[0].strftime('%Y-%m-%d %H:%M') + ' [s]') figfname = data_output_path+str(traj_ID[0])+'_pos.png' figfname = plot_pos( lat, lon, tt_s, [figfname], cb_label=cb_label, titl=str(traj_ID[0])+' Cell Position') print('Plotted '+' '.join(figfname)) fname = args.trtbase+'Santis_cell_scores.csv' write_trt_cell_scores( cell_ID_max_list, time_flash_density_max_list, flash_density_max_rank_list, nflashes_max_list, area_flash_max_list, flash_density_max_list, time_rank_max_list, rank_max_list, fname) fname = args.trtbase+'Santis_cell_euclid_lightning.csv' write_trt_cell_lightning( cell_ID_list, time_list, lon_list, lat_list, area_list, rank_flash_density_list, nflash_list, flash_density_list, fname) plot_scatter_comp( flash_density_list, rank_flash_density_list/10., [args.trtbase+'hist_flash_density_rank'], labelx='flash density [flashes/km2]', labely='rank', titl='Flash density vs Rank', axis=None, metadata=None, dpi=72) def _print_end_msg(text): """ prints end message Parameters ---------- text : str the text to be printed Returns ------- Nothing """ print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) # --------------------------------------------------------- # Start main: # --------------------------------------------------------- if __name__ == "__main__": main()
null
src/pyrad_proc/pyrad/EGG-INFO/scripts/main_process_trt_data.py
main_process_trt_data.py
py
10,006
python
en
code
null
code-starcoder2
83
[ { "api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call" }, { "api_name": "datetime.datetime.utcnow", "line_number": 68, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 68, "usage_type": "attribute" }, { "api_name": "atexit.register", "line_number": 69, "usage_type": "call" }, { "api_name": "numpy.size", "line_number": 76, "usage_type": "call" }, { "api_name": "warnings.warn", "line_number": 77, "usage_type": "call" }, { "api_name": "numpy.size", "line_number": 78, "usage_type": "call" }, { "api_name": "numpy.ma.asarray", "line_number": 104, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 104, "usage_type": "attribute" }, { "api_name": "numpy.ma.asarray", "line_number": 105, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 105, "usage_type": "attribute" }, { "api_name": "datetime.datetime", "line_number": 105, "usage_type": "attribute" }, { "api_name": "numpy.ma.asarray", "line_number": 106, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 106, "usage_type": "attribute" }, { "api_name": "numpy.ma.asarray", "line_number": 107, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 107, "usage_type": "attribute" }, { "api_name": "numpy.ma.asarray", "line_number": 108, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 108, "usage_type": "attribute" }, { "api_name": "numpy.ma.asarray", "line_number": 109, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 109, "usage_type": "attribute" }, { "api_name": "numpy.ma.asarray", "line_number": 110, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 110, "usage_type": "attribute" }, { "api_name": "numpy.ma.asarray", "line_number": 111, "usage_type": "call" }, { "api_name": "numpy.ma", "line_number": 111, "usage_type": "attribute" }, { "api_name": "glob.glob", "line_number": 117, "usage_type": "call" }, { "api_name": "pyrad.io.read_trt_traj_data", "line_number": 123, "usage_type": "call" }, { "api_name": "pyrad.util.belongs_roi_indices", "line_number": 125, "usage_type": "call" }, { "api_name": "os.path.isdir", "line_number": 133, "usage_type": "call" }, { "api_name": "os.path", "line_number": 133, "usage_type": "attribute" }, { "api_name": "os.makedirs", "line_number": 134, "usage_type": "call" }, { "api_name": "shutil.copy", "line_number": 137, "usage_type": "call" }, { "api_name": "numpy.max", "line_number": 142, "usage_type": "call" }, { "api_name": "numpy.argmax", "line_number": 143, "usage_type": "call" }, { "api_name": "numpy.argmax", "line_number": 144, "usage_type": "call" }, { "api_name": "numpy.argmax", "line_number": 146, "usage_type": "call" }, { "api_name": "numpy.argmax", "line_number": 148, "usage_type": "call" }, { "api_name": "numpy.max", "line_number": 149, "usage_type": "call" }, { "api_name": "numpy.argmax", "line_number": 150, "usage_type": "call" }, { "api_name": "numpy.append", "line_number": 152, "usage_type": "call" }, { "api_name": "numpy.append", "line_number": 153, "usage_type": "call" }, { "api_name": "numpy.append", "line_number": 154, "usage_type": "call" }, { "api_name": "numpy.append", "line_number": 155, "usage_type": "call" }, { "api_name": "numpy.append", "line_number": 156, "usage_type": "call" }, { "api_name": "numpy.append", "line_number": 157, "usage_type": "call" }, { "api_name": "numpy.append", "line_number": 159, "usage_type": "call" }, { "api_name": "numpy.append", "line_number": 160, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 164, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 170, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 175, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 181, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 187, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 192, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 198, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 205, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 211, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_timeseries", "line_number": 217, "usage_type": "call" }, { "api_name": "numpy.empty", "line_number": 225, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_pos", "line_number": 232, "usage_type": "call" }, { "api_name": "pyrad.io.write_trt_cell_scores", "line_number": 238, "usage_type": "call" }, { "api_name": "pyrad.io.write_trt_cell_lightning", "line_number": 244, "usage_type": "call" }, { "api_name": "pyrad.graph.plot_scatter_comp", "line_number": 248, "usage_type": "call" }, { "api_name": "datetime.datetime.utcnow", "line_number": 269, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 269, "usage_type": "attribute" } ]
480854253
from django.contrib import admin from users.models import FoodPreference, UserFoodPreference class FoodPreferenceAdmin(admin.ModelAdmin): list_display = ('slug', 'name') class UserFoodPreferenceAdmin(admin.ModelAdmin): list_display = ('user_id', 'preference_id') # Register your models here. admin.site.register(FoodPreference, FoodPreferenceAdmin) admin.site.register(UserFoodPreference, UserFoodPreferenceAdmin)
null
users/admin.py
admin.py
py
428
python
en
code
null
code-starcoder2
83
[ { "api_name": "django.contrib.admin.ModelAdmin", "line_number": 5, "usage_type": "attribute" }, { "api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name" }, { "api_name": "django.contrib.admin.ModelAdmin", "line_number": 9, "usage_type": "attribute" }, { "api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name" }, { "api_name": "django.contrib.admin.site.register", "line_number": 14, "usage_type": "call" }, { "api_name": "users.models.FoodPreference", "line_number": 14, "usage_type": "argument" }, { "api_name": "django.contrib.admin.site", "line_number": 14, "usage_type": "attribute" }, { "api_name": "django.contrib.admin", "line_number": 14, "usage_type": "name" }, { "api_name": "django.contrib.admin.site.register", "line_number": 15, "usage_type": "call" }, { "api_name": "users.models.UserFoodPreference", "line_number": 15, "usage_type": "argument" }, { "api_name": "django.contrib.admin.site", "line_number": 15, "usage_type": "attribute" }, { "api_name": "django.contrib.admin", "line_number": 15, "usage_type": "name" } ]