seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
13563263971
|
import requests
import time
import json
from hoshino import aiorequests
apiroot = 'https://help.tencentbot.top'
async def getprofile(viewer_id: int, interval: int = 1, full: bool = False) -> dict:
reqid = json.loads(await aiorequests.get(f'{apiroot}/enqueue?full={full}&target_viewer_id={viewer_id}').content.decode('utf8'))['reqeust_id']
if reqid is None:
return "id err"
while True:
query = json.loads(await aiorequests.get(f'{apiroot}/query?request_id={reqid}').content.decode('utf8'))
status = query['status']
if status == 'done':
return query['data']
elif status == 'queue':
time.sleep(interval)
else: # notfound or else
return "queue"
async def queryarena(defs: list, page: int) -> dict:
return json.loads(await aiorequests.get(f'{apiroot}/arena?def={",".join([str(x) for x in defs])}&page={page}').content.decode('utf8'))
|
pcrbot/arena_query_push
|
queryapi.py
|
queryapi.py
|
py
| 933 |
python
|
en
|
code
| 7 |
github-code
|
6
|
69894678589
|
import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col, monotonically_increasing_id
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql import types as t
# reading in the AWS config information from the dl.cfg file
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID'] =config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] =config['AWS']['AWS_SECRET_ACCESS_KEY']
print(os.environ['AWS_ACCESS_KEY_ID'] )
print(os.environ['AWS_SECRET_ACCESS_KEY'] )
#def create_spark_session():
# spark = SparkSession \
# .builder \
# .config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
# .getOrCreate()
# print("spark session created")
# return spark
def create_spark_session():
"""
This creates a Spark session, specifying the hadoop package to use, the S3 buckets and reads in the AWS ID and key as
environment variables
Parameters:
None
Returns:
Spark session object
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.config("spark.hadoop.fs.s3a.impl","org.apache.hadoop.fs.s3a.S3AFileSystem") \
.config("spark.hadoop.fs.s3a.awsAccessKeyId", os.environ['AWS_ACCESS_KEY_ID']) \
.config("spark.hadoop.fs.s3a.awsSecretAccessKey", os.environ['AWS_SECRET_ACCESS_KEY']) \
.getOrCreate()
print("spark session created")
return spark
def process_song_data(spark, input_data, output_data):
"""
This reads in song and artist data as csv files from the udacity s3 bucket as a spark dataframe
and then uses spark_sql to insert select columns into a parquet file format back into the user-generated S3 bucket
Parameters:
spark: A spark session object
input_data: A string representing the udacity-generated s3 bucket root
output_data: A string representing the user-generated s3 bucket root
Output:
No output returned: but two parquet files written to user-generated s3 bucket
"""
# get filepath to song data file
song_data = input_data +"song_data/*/*/*/*.json"
print(song_data)
# read song data file
print("reading in song data")
df = spark.read.json(song_data)
# extract columns to create songs table
df.createOrReplaceTempView("songs_table_df")
songs_table = spark.sql("""
SELECT song_id, title, artist_id,year, duration
FROM songs_table_df
ORDER by song_id
""")
# write songs table to parquet files partitioned by year and artist
songs_table_path = output_data + "songs_table.parquet"
print("read to songs table to parquet format")
songs_table.write.mode("overwrite").partitionBy("year","artist_id").parquet(songs_table_path)
# extract columns to create artists table
df.createOrReplaceTempView("artist_table_df")
artists_table = spark.sql( """
SELECT artist_id AS artist_id,
artist_name AS name,
artist_location AS location,
artist_latitude AS latitude,
artist_longitute AS longitude
FROM artist_table_df
""")
# write artists table to parquet files
artists_table_path = output_data + "artists_table.parquet"
print("write to artist table")
artists_table.write.mode("overwrite").parquet(artist_table_path)
def process_log_data(spark, input_data, output_data):
"""
This reads in log_data from the udacity-generated s3 bucket, where the data relates to songs played, and this is written to a parquet file.
It then takes a subset of the log_data, creates time- and date- stamps by using a udf with lambdas, and this is written to a parquet file.
Song data is then read into a data frame and joined with log data to create a joined table, which is written to a parquet file.
Parameters:
spark: A spark session object
input_data: A string representing the udacity-generated s3 bucket root
output_data: A string representing the user-generated s3 bucket root
Returns:
users_table: A spark dataframe holding user information
time_table: A spark dataframe holding time information
songplays_table: A spark dataframe holding songplay information
"""
# get filepath to log data file
log_data = input_data + "log_data/*.json"
print("reading in log data")
# read log data file
df_log = spark.read.json(log_data)
# filter by actions for song plays
df_log = df_log.filter(df.page == 'NextSong')
# extract columns for users table
df_log.createOrReplaceTempView("users_table_df")
users_table = spark.sql("""
SELECT DISTINCT userId AS userid,
firstName AS first_name,
lastName AS last_name,
gender,
level
FROM users_table_df
ORDER BY last_name
""")
print("writing to parquet format")
# write users table to parquet files
users_table_path = output_data + "users_table.parquet"
users_table.write.mode("overwrite").parquet(users_table_path)
# create timestamp column from original timestamp column
get_timestamp = udf(lambda x: datetime.fromtimestamp((x/1000.0)),TimestampType())
df_log = df_log.withColumn("timestamp", gettimestamp("ts"))
# create datetime column from original timestamp column
get_datetime = udf(lambda x: datetime.fromtimestamp(ts/1000.0).strfrmtime('%Y-%m-%d %H:%M:%S'))
df_log = df_log.withColumn("datetime",get_datetime("ts"))
# extract columns to create time table
df_log.createOrReplaceTempView("time_table_df")
time_table = spark.sql("""SELECT DISTINCT
datetime as start_time,
hour(timestamp) as hour,
day(timestamp) as day,
weekofyear(timestamp) as week,
month(timestamp) as month,
year(timestamp) as year,
dayofweek(timestamp) as weekday
FROM time_table_df
ORDER BY start_time
""")
# write time table to parquet files partitioned by year and month
time_table_path = output_data + "time_table.parquet"
time_table.write.mode("overwrite").partitionBy("year","month").parquet(time_table_path)
# read in song data to use for songplays table
song_df = spark.read.json(song_data)
#join log and song df together
df_log_song_df_joined = df_log_filtered.join(df_song, (df_log_filtered.artist == df_song.artist_name) & (df_log_filtered.song == df_song.title))
# extract columns from joined song and log datasets to create songplays table
df_log_song_df_joined.createOrReplaceTempView("songplays_table_df")
songplays_table = spark.sql("""
SELECT songplay_id AS songplay_id,
timestamp AS start_time,
userId AS user_id,
level AS level,
song_id AS song_id,
artist_id AS artist_id,
sessionId AS session_id,
location AS location,
userAgent AS user_agent
FROM songplays_table_DF
ORDER BY (user_id, session_id)
""")
# write songplays table to parquet files partitioned by year and month
songplays_table_path = output_data + "songplays_table.parquet"
songplays_table.write.mode("overwrite").partitionBy("year","month").parquet(songplays_table_path)
return users_table, time_table, songplays_table
def main():
"""
Main function for the code.
It creates a spark session, defines the paths of the input and ouput buckets, and call the two functions
process_song_data and process_log_data
"""
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://udacity-lake/output_data/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
|
greggwilliams58/data-lake
|
etl.py
|
etl.py
|
py
| 8,843 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72137353787
|
import logging
import json
from discord import Interaction, app_commands, Role
from discord.app_commands import Choice
from discord.ext.commands import Bot, Cog
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
with open("config.json") as cfg_json:
cfg = json.loads(cfg_json.read())
owner_id = cfg["owner_id"]
async def log_reply(ctx: Interaction, response: str, ephemeral=True):
log = f"From {ctx.user}: {response}"
if ctx.guild:
log = f"From {ctx.user} in {ctx.guild.name}: {response}"
logger.info(log)
await ctx.response.send_message(response, ephemeral=ephemeral)
async def is_owner(ctx: Interaction) -> bool:
if ctx.user.id != owner_id:
await log_reply(ctx, f"**Error:** Only my owner can use this command")
return False
return True
class CommandError(Exception):
def __init__(self, msg: str):
self.msg = msg
super().__init__(msg)
@classmethod
async def send_err(cls, ctx: Interaction, msg: str):
self = cls(msg)
await log_reply(ctx, self.msg)
return self
class OwnerCog(Cog):
"""Commands that can only be used by the bot's owner"""
def __init__(self, bot: Bot):
self.bot = bot
@app_commands.command(name="sync")
@app_commands.check(is_owner)
async def sync_commands(self, ctx: Interaction):
"""(Owner only) Syncs app command info with Discord"""
await self.bot.tree.sync()
await log_reply(ctx, "Synced command tree with Discord")
emote = app_commands.Group(
name="emote",
description=("(Owner only) Modify DGG emote translations"),
)
@emote.command(name="add")
@app_commands.describe(
dgg_version="The emote as it's used in DGG",
disc_version="The emote as it's used in Discord",
)
@app_commands.check(is_owner)
async def add_emote(self, ctx: Interaction, dgg_version: str, disc_version: str):
"""(Owner only) Add or modify a DGG emote translation"""
self.bot.emotes[dgg_version] = disc_version
self.bot.save_cfg()
await log_reply(ctx, f"Translating {dgg_version} to {str(disc_version)}")
@emote.command(name="remove")
@app_commands.describe(dgg_version="The emote to remove (in DGG format)")
@app_commands.check(is_owner)
async def remove_emote(self, ctx: Interaction, dgg_version: str):
"""(Owner only) Remove a DGG emote translation"""
if dgg_version in self.bot.emotes.keys():
removed_emote = self.bot.emotes.pop(dgg_version)
self.bot.save_cfg()
await log_reply(ctx, f"Removed {removed_emote} from emotes")
else:
await log_reply(ctx, f"Couldn't find emote {dgg_version}")
config = app_commands.Group(
name="config",
description=("(Owner only) Modify the bot's config file"),
)
@config.command(name="remove")
@app_commands.choices(
mode=[
Choice(name="phrase", value="phrase"),
Choice(name="relay", value="relay"),
]
)
@app_commands.check(is_owner)
async def config_remove(self, ctx: Interaction, mode: str, value: str):
"""Remove a relay or phrase from the config file"""
if mode == "phrase" and value in self.bot.phrases:
del self.bot.phrases[value]
self.bot.save_cfg()
await log_reply(ctx, f"Removed '{value}' from phrases", ephemeral=False)
elif mode == "relay" and value in self.bot.relays:
del self.bot.relays[value]
self.bot.save_cfg()
await log_reply(ctx, f"Removed '{value}' from relays", ephemeral=False)
else:
await log_reply(ctx, f"Couldn't find '{value}' in {mode}s")
class PublicCog(Cog):
"""Commands that can be used by anybody"""
def __init__(self, bot: Bot):
self.bot = bot
async def get_relay_channel(self, ctx: Interaction) -> int:
if not ctx.guild:
err = "**Error:** This command is only usable in servers"
raise await CommandError(err).send_err(ctx, err)
if "dgg-relay-mod" not in (role.name for role in ctx.user.roles):
err = "**Error:** This command requires the 'dgg-relay-mod' role"
raise await CommandError(err).send_err(ctx, err)
relay_channel = None
for channel in ctx.guild.channels:
if channel.name == "dgg-relay":
relay_channel = channel.id
break
if not relay_channel:
err = f"**Error:** No '#dgg-relay' channel found in '{ctx.guild.name}'"
raise await CommandError(err).send_err(ctx, err)
return relay_channel
relay = app_commands.Group(
name="relay",
description="Relays DGG messages to servers",
)
@relay.command(name="add")
@app_commands.describe(dgg_username="The DGG user you want to relay messages from")
async def relay_add(self, ctx: Interaction, dgg_username: str):
"""Add a DGG user whose messages get forwarded to this server (case sensitive!)"""
relay_channel = await self.get_relay_channel(ctx)
if dgg_username not in self.bot.relays:
self.bot.relays[dgg_username] = []
logger.info(f"Added new relay list '{dgg_username}'")
if relay_channel not in self.bot.relays[dgg_username]:
self.bot.relays[dgg_username].append(relay_channel)
response = (
f"Messages from '{dgg_username}' will be relayed to '{ctx.guild.name}'"
)
else:
response = f"**Error:** '{dgg_username}' is already being relayed to '{ctx.guild.name}'"
self.bot.save_cfg()
await log_reply(ctx, response, ephemeral=False)
@relay.command(name="remove")
@app_commands.describe(dgg_username="The DGG user you want to stop relaying")
async def relay_remove(self, ctx: Interaction, dgg_username: str):
"""Remove a DGG user's relay from this server"""
relay_channel = await self.get_relay_channel(ctx)
response = None
if dgg_username in self.bot.relays.keys():
if relay_channel in self.bot.relays[dgg_username]:
self.bot.relays[dgg_username].remove(relay_channel)
response = f"Removed '{dgg_username}' relay from '{ctx.guild.name}'"
if not self.bot.relays[dgg_username]:
self.bot.relays.pop(dgg_username)
logger.info(f"Removed empty relay list for '{dgg_username}'")
self.bot.save_cfg()
if not response:
response = (
f"**Error:** '{dgg_username}' isn't being relayed to '{ctx.guild.name}'"
" (try the '/relay list' command)"
)
await log_reply(ctx, response, ephemeral=False)
@relay.command(name="list")
async def relay_list(self, ctx: Interaction):
"""Lists DGG users currently being relayed to this server."""
relay_channel = await self.get_relay_channel(ctx)
relays = []
for nickname in self.bot.relays:
for channel in self.bot.relays[nickname]:
if channel == relay_channel:
relays.append(nickname)
relays = "', '".join(relays)
response = f"This server gets messages from: '{relays}'"
if not relays:
response = "No relays are active for this server."
await log_reply(ctx, response, ephemeral=False)
live_notifications = app_commands.Group(
name="live-notifications",
description="Configure live notifications for Destiny",
)
@live_notifications.command(name="on")
async def live_notifications_on(self, ctx: Interaction):
"""Enable live notifications for this server"""
relay_channel = await self.get_relay_channel(ctx)
if relay_channel not in self.bot.live["channels"].keys():
self.bot.live["channels"][relay_channel] = {"role": None}
self.bot.live["channels"][relay_channel]["enabled"] = True
self.bot.save_cfg()
response = f"Live notifications enabled for {ctx.guild.name}"
await log_reply(ctx, response, ephemeral=False)
@live_notifications.command(name="off")
async def live_notifications_off(self, ctx: Interaction):
"""Disable live notifications for this server"""
relay_channel = await self.get_relay_channel(ctx)
if relay_channel not in self.bot.live["channels"].keys():
self.bot.live["channels"][relay_channel] = {"role": None}
self.bot.live["channels"][relay_channel]["enabled"] = False
self.bot.save_cfg()
response = f"Live notifications disabled for {ctx.guild.name}"
await log_reply(ctx, response, ephemeral=False)
@live_notifications.command(name="role")
@app_commands.describe(role="The role that will be pinged")
async def live_notifications_role(self, ctx: Interaction, role: Role):
"""Set a role that gets pinged for live notifications"""
relay_channel = await self.get_relay_channel(ctx)
if relay_channel not in self.bot.live["channels"].keys():
self.bot.live["channels"][relay_channel] = {"enabled": True}
self.bot.live["channels"][relay_channel]["role"] = role.id
self.bot.save_cfg()
response = (
f'"<@&{role.id}>" will be pinged for live notifications in {ctx.guild.name}'
)
await log_reply(ctx, response, ephemeral=False)
def check_prefs(self, disc_user):
if disc_user not in self.bot.user_prefs.keys():
self.bot.user_prefs[disc_user] = {"detect_presence": False, "ignores": []}
logger.info(f"Added new user '{disc_user}' to preferences list")
phrase = app_commands.Group(
name="phrase",
description="Relays DGG messages to users",
)
@phrase.command(name="add")
@app_commands.describe(
phrase="The phrase you want forwarded to you (most likely your DGG username)"
)
async def phrase_add(self, ctx: Interaction, phrase: str):
"""Add a phrase (usually a username) that will be forwarded
to you when it's used in DGG (case insensitive)"""
self.check_prefs(ctx.user.id)
if phrase not in self.bot.phrases:
self.bot.phrases[phrase] = []
logger.info(f"Added new phrase list for '{phrase}'")
if ctx.user.id not in self.bot.phrases[phrase]:
self.bot.phrases[phrase].append(ctx.user.id)
response = f"Forwarding '{phrase}' to {ctx.user}"
else:
response = f"**Error:** '{phrase}' is already being forwarded to {ctx.user}"
self.bot.save_cfg()
await log_reply(ctx, response)
@phrase.command(name="remove")
@app_commands.describe(phrase="The phrase you want to stop being forwarded")
async def phrase_remove(self, ctx: Interaction, phrase: str):
"""Stop a phrase from being forwarded to you"""
self.check_prefs(ctx.user.id)
response = None
if phrase in self.bot.phrases:
if ctx.user.id in self.bot.phrases[phrase]:
self.bot.phrases[phrase].remove(ctx.user.id)
response = f"No longer forwarding '{phrase}' to {ctx.user}"
if not self.bot.phrases[phrase]:
self.bot.phrases.pop(phrase)
logger.info(f"Removed empty phrase list '{phrase}'")
self.bot.save_cfg()
if not response:
response = (
f"**Error:** '{phrase}' isn't being forwarded to {ctx.user}"
" (try the '/phrase list' command)"
)
await log_reply(ctx, response)
@phrase.command(name="list")
async def phrase_list(self, ctx: Interaction):
"""List the phrases currently being forwarded to you"""
disc_user = ctx.user.id
user_phrases = []
for phrase in self.bot.phrases:
for user_id in self.bot.phrases[phrase]:
if user_id == disc_user:
user_phrases.append(phrase)
user_phrases = "', '".join(user_phrases)
response = f"Your phrases: '{user_phrases}'"
if not user_phrases:
response = "No phrases are being forwarded to you."
await log_reply(ctx, response)
@phrase.command(name="detect-dgg-presence")
@app_commands.describe(mode="Set to True to detect DGG presence. Default is False.")
async def detect_dgg_presence(self, ctx: Interaction, mode: bool):
"""Change behavior of the /phrase command by controlling when the bot messages you."""
self.check_prefs(ctx.user.id)
self.bot.user_prefs[ctx.user.id]["detect_presence"] = mode
self.bot.save_cfg()
word = "enabled" if mode else "disabled"
response = f"Presence detection {word} for {ctx.user.name}"
await log_reply(ctx, response)
ignore = app_commands.Group(
name="ignore",
description="Configure your DGG Relay ignore list",
)
@ignore.command(name="add")
@app_commands.describe(dgg_username="The user in DGG you want to ignore")
async def add_ignore(self, ctx: Interaction, dgg_username: str):
"""Ignore messages from a DGG user"""
self.check_prefs(ctx.user.id)
ignores = self.bot.user_prefs[ctx.user.id]["ignores"]
ignores.append(dgg_username)
self.bot.user_prefs[ctx.user.id]["ignores"] = list(set(ignores))
self.bot.save_cfg()
response = f"'{dgg_username}' added to your ignore list"
await log_reply(ctx, response)
@ignore.command(name="remove")
@app_commands.describe(dgg_username="The user in DGG you want to unignore")
async def add_ignore(self, ctx: Interaction, dgg_username: str):
"""Remove someone from your ignore list"""
self.check_prefs(ctx.user.id)
ignores = self.bot.user_prefs[ctx.user.id]["ignores"]
if dgg_username not in ignores:
await log_reply(ctx, f"'{dgg_username}' is not in your ignore list")
return
self.bot.user_prefs[ctx.user.id]["ignores"].remove(dgg_username)
self.bot.save_cfg()
response = f"'{dgg_username}' removed from your ignore list"
await log_reply(ctx, response)
|
tenacious210/dgg-relay
|
cogs.py
|
cogs.py
|
py
| 14,701 |
python
|
en
|
code
| 2 |
github-code
|
6
|
3344378919
|
import logging
import sys
from loguru import logger
from starlette.config import Config
from starlette.datastructures import Secret
from app.core.logger import InterceptHandler
config = Config(".env")
API_PREFIX = "/api"
VERSION = "0.1.0"
DEBUG: bool = config("DEBUG", cast=bool, default=False)
MAX_CONNECTIONS_COUNT: int = config("MAX_CONNECTIONS_COUNT", cast=int, default=10)
MIN_CONNECTIONS_COUNT: int = config("MIN_CONNECTIONS_COUNT", cast=int, default=10)
HOST: str = config("HOST", cast=str, default="0.0.0.0")
PORT: int = config("PORT", cast=int, default=35100)
SECRET_KEY: Secret = config("SECRET_KEY", cast=Secret, default="")
PROJECT_NAME: str = config("PROJECT_NAME", default="augmentation")
# logging configuration
LOGGING_LEVEL = logging.DEBUG if DEBUG else logging.INFO
logging.basicConfig(
handlers=[InterceptHandler(level=LOGGING_LEVEL)], level=LOGGING_LEVEL
)
logger.configure(handlers=[{"sink": sys.stderr, "level": LOGGING_LEVEL}])
FASTTEXT_PATH = config("FASTTEXT_PATH", default="./model/cc.vi.300.vec")
PHOBERT_PATH = config("PHOBERT_PATH", default="./model/PhoBERT_base_fairseq")
STOPWORD_PATH = config("STOPWORD_PATH", default="./data/vietnamese-stopwords.txt")
IRRELEVANT_WORD_PATH = config("IRRELEVANT_WORD_PATH", default="./data/irrelevant_words.txt")
EDIT_DISTANCE_PATH = config("EDIT_DISTANCE_PATH", default="./data/edit_distance.txt")
MAX_CACHE_SIZE = config("MAX_CACHE_SIZE", cast=int, default=1000)
PHO_NLP_URL = config("PHO_NLP_URL", default="http://172.29.13.23:20217/")
VN_CORE_PATH = config("VN_CORE_PATH", default="http://172.29.13.23")
VN_CORE_PORT = config("VN_CORE_PORT", cast=int, default=20215)
|
hieunt2501/text-augmentation
|
app/core/config.py
|
config.py
|
py
| 1,648 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18132721237
|
from flask import Flask, redirect, render_template, request, url_for, session, flash
from datetime import timedelta
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.secret_key = "hello"
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///users.sqlite3' # Things you have to set up before creating a database
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.permanent_session_lifetime = timedelta(minutes=5) # Deciding the session time
db = SQLAlchemy(app) # creating a database
class users(db.Model):
_id = db.Column("id", db.Integer, primary_key=True)
name = db.Column(db.String(100))
email = db.Column(db.String(100))
def __init__(self, name, email):
self.name = name
self.email = email
@app.route('/')
def home():
return render_template("Home.html")
@app.route("/login", methods=["POST",'GET'])
def login():
if request.method == "POST":
session.permanent = True
user = request.form['nm']
session['user'] = user
# 先以名字進行查詢
found_user = users.query.filter_by(name=user).first()
#如果有的話就把user的email加進去session list裡面, 如果沒有的話就加入一筆新資料到資料庫
if found_user:
flash(f"Welcome back {user}!")
session['email'] = found_user.email
else:
flash(f"Hello {user}!, Nice to meet you!!")
usr = users(user ,"")
db.session.add(usr)
db.session.commit()
return redirect(url_for('user_page'))
else:
if "user" in session:
flash("Already Logged in!")
return redirect(url_for('user_page'))
return render_template("login.html")
@app.route('/logout')
def logout():
if 'user' in session:
user = session['user']
flash(f"You have been logged out, {user}!", "info")
session.pop('user', None)
session.pop('email', None)
return redirect(url_for('login') )
@app.route('/user', methods=["POST",'GET'])
def user_page():
email = None
if 'user' in session:
user = session['user']
if request.method == "POST":
email = request.form['email'] # 使用者輸入的email
session['email'] = email #也建立一個session
found_user = users.query.filter_by(name=user).first() # 找到user之後要做的事情
found_user.email = email # 更新使用者新輸入的email
db.session.commit() # 每次更新完就要儲存 commit 一次
flash("Email was saved!!")
else:
if "email" in session:
email = session['email']
return render_template("user_page.html",content=user, email=email)
else:
flash("You are not logged in! ")
return redirect(url_for('login'))
@app.route("/view")
def view():
return render_template("view.html", values=users.query.all())
if __name__ == '__main__':
with app.app_context():
db.create_all()
app.run(debug=True)
|
JayChen1060920909/Projects
|
Login-Logout.py
|
Login-Logout.py
|
py
| 3,056 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21346415865
|
problems = input()
count_trust = 0
implemented_problems = 0
for i in range(0,int(problems)):
trust_line = input()
trust_line = trust_line.split(' ')
for j in trust_line:
if j=='1':
count_trust+=1
if count_trust >1:
implemented_problems +=1
count_trust = 0
print(implemented_problems)
|
YahyaQandel/CodeforcesProblems
|
Team.py
|
Team.py
|
py
| 334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17953524197
|
# -*- coding: utf-8 -*-
import numpy as np
import torch
import torch.nn as nn
from lichee import plugin
from lichee import config
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'mse_loss')
class MSELoss:
@classmethod
def build(cls, cfg):
return nn.MSELoss()
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'cross_entropy')
class CrossEntropyLoss:
@classmethod
def build(cls, cfg):
return nn.CrossEntropyLoss()
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'neg_log_likelihood')
class NLLLoss:
@classmethod
def build(cls, cfg):
return nn.NLLLoss()
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'binary_cross_entropy')
class BinaryCrossEntropyLoss:
@classmethod
def build(cls, cfg):
return nn.BCEWithLogitsLoss()
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'binary_focal_loss')
class BinaryFocalLoss(nn.Module):
"""
This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)*log(pt)
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param reduction: `none`|`mean`|`sum`
:param **kwargs
balance_index: (int) balance class index, should be specific when alpha is float
"""
def __init__(self, alpha=[1.0, 1.0], gamma=2, ignore_index=None, reduction='mean'):
super(BinaryFocalLoss, self).__init__()
if alpha is None:
alpha = [0.25, 0.75]
self.alpha = alpha
self.gamma = gamma
self.smooth = 1e-6
self.ignore_index = ignore_index
self.reduction = reduction
assert self.reduction in ['none', 'mean', 'sum']
if self.alpha is None:
self.alpha = torch.ones(2)
elif isinstance(self.alpha, (list, np.ndarray)):
self.alpha = np.asarray(self.alpha)
self.alpha = np.reshape(self.alpha, (2))
assert self.alpha.shape[0] == 2, \
'the `alpha` shape is not match the number of class'
elif isinstance(self.alpha, (float, int)):
self.alpha = np.asarray([self.alpha, 1.0 - self.alpha], dtype=np.float).view(2)
else:
raise TypeError('{} not supported'.format(type(self.alpha)))
self.one_hot_eye = None
@classmethod
def set_config_default(cls, cfg):
d_c = {'loss_alpha': [1.0, 1.0],
'loss_gamma': 2,
'loss_ignore_index': None,
'loss_reduction': 'mean'}
for key, value in d_c.items():
if key not in cfg.PARAM:
cfg.PARAM[key] = value
@classmethod
def build(cls, cfg):
cls.set_config_default(cfg)
return cls(alpha=cfg.PARAM["loss_alpha"],
gamma=cfg.PARAM["loss_gamma"],
ignore_index=cfg.PARAM["loss_ignore_index"],
reduction=cfg.PARAM["loss_reduction"])
def forward(self, output, target):
prob = torch.sigmoid(output)
prob = torch.clamp(prob, self.smooth, 1.0 - self.smooth)
if self.one_hot_eye == None:
self.one_hot_eye = torch.eye(2).cuda(target.device.index)
target = self.one_hot_eye[target]
pos_mask = (target == 1).float()
neg_mask = (target == 0).float()
pos_loss = -self.alpha[0] * torch.pow(torch.sub(1.0, prob), self.gamma) * torch.log(prob) * pos_mask
neg_loss = -self.alpha[1] * torch.pow(prob, self.gamma) * \
torch.log(torch.sub(1.0, prob)) * neg_mask
neg_loss = neg_loss.sum()
pos_loss = pos_loss.sum()
num_pos = pos_mask.view(pos_mask.size(0), -1).sum()
num_neg = neg_mask.view(neg_mask.size(0), -1).sum()
if num_pos == 0:
loss = neg_loss
else:
loss = pos_loss / num_pos + neg_loss / num_neg
return loss
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'focal_loss')
class FocalLoss(nn.Module):
"""
This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)*log(pt)
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param smooth: (float,double) smooth value when cross entropy
:param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch.
"""
def __init__(self, num_class, alpha=[0.25, 0.75], gamma=2, balance_index=-1, size_average=True):
super(FocalLoss, self).__init__()
self.num_class = num_class
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
self.eps = 1e-6
if isinstance(self.alpha, (list, tuple)):
assert len(self.alpha) == self.num_class
self.alpha = torch.Tensor(list(self.alpha))
elif isinstance(self.alpha, (float, int)):
assert 0 < self.alpha < 1.0, 'alpha should be in `(0,1)`)'
assert balance_index > -1
alpha = torch.ones((self.num_class))
alpha *= 1 - self.alpha
alpha[balance_index] = self.alpha
self.alpha = alpha
elif isinstance(self.alpha, torch.Tensor):
self.alpha = self.alpha
else:
raise TypeError('Not support alpha type, expect `int|float|list|tuple|torch.Tensor`')
@classmethod
def set_config_default(cls, cfg):
d_c = {'loss_alpha': [0.25, 0.75],
'loss_gamma': 2,
'loss_balance_index': -1,
'loss_size_average': True}
for key, value in d_c.items():
if key not in cfg.PARAM:
cfg.PARAM[key] = value
@classmethod
def build(cls, cfg):
cls.set_config_default(cfg)
return cls(num_class=config.get_cfg().DATASET.CONFIG.NUM_CLASS,
alpha=cfg.PARAM["loss_alpha"],
gamma=cfg.PARAM["loss_gamma"],
balance_index=cfg.PARAM["loss_balance_index"],
size_average=cfg.PARAM["loss_size_average"])
def forward(self, logit, target):
if logit.dim() > 2:
# N,C,d1,d2 -> N,C,m (m=d1*d2*...)
logit = logit.view(logit.size(0), logit.size(1), -1)
logit = logit.transpose(1, 2).contiguous() # [N,C,d1*d2..] -> [N,d1*d2..,C]
logit = logit.view(-1, logit.size(-1)) # [N,d1*d2..,C]-> [N*d1*d2..,C]
target = target.view(-1, 1) # [N,d1,d2,...]->[N*d1*d2*...,1]
# -----------legacy way------------
# idx = target.cpu().long()
# one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_()
# one_hot_key = one_hot_key.scatter_(1, idx, 1)
# if one_hot_key.device != logit.device:
# one_hot_key = one_hot_key.to(logit.device)
# pt = (one_hot_key * logit).sum(1) + epsilon
# ----------memory saving way--------
pt = logit.gather(1, target).view(-1) + self.eps # avoid apply
logpt = pt.log()
if self.alpha.device != logpt.device:
alpha = self.alpha.to(logpt.device)
alpha_class = alpha.gather(0, target.view(-1))
logpt = alpha_class * logpt
loss = -1 * torch.pow(torch.sub(1.0, pt), self.gamma) * logpt
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
|
Tencent/Lichee
|
lichee/module/torch/loss/loss.py
|
loss.py
|
py
| 8,023 |
python
|
en
|
code
| 295 |
github-code
|
6
|
72067617789
|
import math as ma
import multiprocessing as mp
import time
def first_test(n):
""" test naïf de primalité
retourne True si l'entier est premier, et inversement
n : un entier naturel
"""
for a in range(2, int(ma.sqrt(n) + 1)):
if n % a == 0:
return False
return True
def pi(x):
"""retourne le nombre de premiers inférieurs à x
X : un réel
"""
cpt = 0
for n in range(1, int(x)):
if first_test(n):
cpt += 1
return cpt
def gen_carmichael(t):
"""retourne tous les nombres de Carmichael inférieurs à x
t : un réel
"""
res = []
for x in range(3, int(t), 2): # les nombres de Carmichael sont impairs
valid = False
for y in range(2, x):
if ma.gcd(x, y) == 1:
if pow(y, x-1, x) != 1:
valid = False
break
else:
valid = True
if valid:
res.append(x)
return res
def worker_proc(x):
valid = False
for y in range(2, x):
if ma.gcd(x, y) == 1:
if pow(y, x - 1, x) != 1:
return
else:
valid = True
if valid:
print(x)
def gen_carmichael_mp(t):
"""retourne tous les nombres de Carmichael inférieurs à t
version multiprocess
t : un réel
"""
pool = mp.Pool(processes=mp.cpu_count())
pool.map(worker_proc, range(3, int(t), 2))
def gen_carmichael_3(k):
""" genère les nombres de Carmicheal de longueur binaire k à partir de trois diviseurs premiers
k : un entier
"""
# t = int(t)
prime = []
for n in range(3, 2 ** k, 2):
if first_test(n):
prime.append(n)
res = []
for i_a, a in enumerate(prime):
for i_b, b in enumerate(prime[:i_a]):
ab = a * b
for c in prime[:i_b]:
# on a obtenu 3 premiers, on teste si leur produit est Carmichael
# worker_proc(a * b * c)
tst = ab * c - 1
if tst.bit_length() != k:
continue
if tst % 2 == 0 and tst % (a - 1) == 0 and tst % (b - 1) == 0 and tst % (c - 1) == 0 and a % (b * c) != 0:
res.append(tst + 1)
return sorted(res)
def gen_carmichael_3_all(t):
""" genère un nombre de Carmicheal inférieur t à partir de trois diviseurs premiers
version sans contrainte de taille
"""
t = int(t)
prime = []
for n in range(3, t, 2):
if first_test(n):
prime.append(n)
res = []
for i_a, a in enumerate(prime):
for i_b, b in enumerate(prime[:i_a]):
ab = a * b
for c in prime[:i_b]:
tst = ab * c - 1
if tst % 2 == 0 and tst % (a - 1) == 0 and tst % (b - 1) == 0 and tst % (c - 1) == 0 and a % (b * c) != 0:
res.append(tst + 1)
return sorted(res)
def gen_carmichael_2(p):
"""retourne tous les nombre de carmichael de la forme pqr pour un p donné"""
prime = []
for n in range(3, 2 * p * (p ** 2 + 1), 2):
if n == p:
continue
if first_test(n):
prime.append(n)
res = []
for i_r, r in enumerate(prime):
for q in prime[:i_r]:
tst = p * q * r - 1
if tst % 2 == 0 and tst % (p - 1) == 0 and tst % (q - 1) == 0 and tst % (r - 1) == 0 and r % (p * q) != 0:
res.append(tst + 1)
return sorted(res)
if __name__ == '__main__':
t = time.time()
gen_carmichael_mp(10000)
print("mt : ", str(time.time() - t))
t = time.time()
print(gen_carmichael(64000))
print("naif : ", str(time.time() - t))
t = time.time()
print(gen_carmichael_3_all(100))
print("3 : ", str(time.time() - t))
|
BasileLewan/ProjetCOMPLEX
|
Ex2.py
|
Ex2.py
|
py
| 3,830 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
70111562749
|
import firstscript as sense
import random
import time
def get_random_time():
random_time_frame = random.randrange(7,25)
return random_time_frame
def get_random():
numbers = [1,2,3,4,5,6,7,8,9]
y = get_random_time()
for i in range(y):
sense.display_letter(str(random.choice(numbers)))
time.sleep(.5)
sense.clear_hat()
sense.display_letter(str(random.choice(numbers)),[255,0,0])
if __name__ == '__main__':
get_random()
|
cthacker-udel/Raspberry-Pi-Scripts
|
py/randomnumber.py
|
randomnumber.py
|
py
| 477 |
python
|
en
|
code
| 7 |
github-code
|
6
|
38520392642
|
#!/usr/bin/python
"""
This is the code to accompany the Lesson 1 (Naive Bayes) mini-project.
Use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
import time
sys.path.append("../tools/")
from email_preprocess import preprocess
#imports
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
#########################################################
### your code goes here ###
sample_size_list = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
##Loop to change sample Size
for j in sample_size_list:
features_train, features_test, labels_train, labels_test = preprocess("../tools/word_data_unix.pkl","../tools/email_authors.pkl",j)
loop = [0,1,2,3,4,5,6,7,8,9]
print("Test sample Size:",features_train.size)
##Loop to change the var_smoothing
for i in loop:
num=1/(10)**i
gnb = GaussianNB(var_smoothing=num)
time0=time.time()
pred = gnb.fit(features_train, labels_train).predict(features_test)
time1=time.time()
acc=accuracy_score(labels_test,pred)
print("Test sample_Size: ",j," Accuracy for ",num,": ", acc,"Ellapsed time: ",time1-time0)
i=i+1
j=1+1
#########################################################
|
Vkadel/machineLearningNB
|
nb_author_id.py
|
nb_author_id.py
|
py
| 1,524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42940796937
|
from typing import List, Tuple
import networkx as nx
import numpy as np
from matplotlib import pyplot as plt
import time
import copy
from node import Node
def get_graph(node: Node) -> Tuple[nx.Graph, List, List]:
board_size = node.state.shape[0]
G = nx.grid_2d_graph(board_size, board_size)
diagonals = []
for x,y in G:
x2 = x-1
y2 = y+1
if y2 >= board_size or x2 < 0:
continue
edge = ((x, y), (x2,y2))
diagonals.append(edge)
G.add_edges_from(diagonals)
pos = {}
colour_map = []
theta = -(1/4) * np.pi
costheta = np.cos(theta)
sintheta = np.sin(theta)
rotation_matrix = np.array([
[costheta, -sintheta],
[sintheta, costheta]
])
for x,y in G:
coords = (x,y)
pos[coords] = np.dot(rotation_matrix, (y,-x))
if node.state[coords] == 1:
colour_map.append("red")
elif node.state[coords] == -1:
colour_map.append("blue")
else:
colour_map.append("grey")
return G, pos, colour_map
def visualize_hex_node_state(node: Node, done: bool=False) -> None:
G, pos, colour_map = get_graph(node)
nx.draw(G, pos=pos,
node_color=colour_map,
with_labels=True,
node_size=600)
plt.draw()
plt.pause(0.001)
if done:
plt.close()
if __name__ == "__main__":
plt.figure(figsize=(5,5))
plt.ion()
plt.show()
test_state = np.zeros(shape=(7,7))
test_state[0, 1] = 1
test_node = Node(state=test_state)
visualize_hex_node_state(test_node)
new_node = copy.copy(test_node)
new_node.state[0,2] = -1
visualize_hex_node_state(new_node)
|
Mathipe98/IT3105-Projects
|
Project 2/visualizer.py
|
visualizer.py
|
py
| 1,707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
858399284
|
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core.inspector import PipelineInspector
from vistrails.gui.common_widgets import QToolWindowInterface
from vistrails.gui.pipeline_view import QPipelineView
from vistrails.gui.theme import CurrentTheme
################################################################################
class QAnnotatedPipelineView(QPipelineView, QToolWindowInterface):
"""
QAnnotatedPipelineView subclass QPipelineView to perform some overlay
marking on a pipeline view
"""
def __init__(self, parent=None):
""" QPipelineView(parent: QWidget) -> QPipelineView
Initialize the graphics view and its properties
"""
QPipelineView.__init__(self, parent)
self.setWindowTitle('Annotated Pipeline')
self.inspector = PipelineInspector()
def sizeHint(self):
""" sizeHint() -> QSize
Prefer the view not so large
"""
return QtCore.QSize(256, 256)
def paintEvent(self, event):
""" paintEvent(event: QPaintEvent) -> None
Paint an overlay annotation on spreadsheet cell modules
"""
QPipelineView.paintEvent(self, event)
# super(QAnnotatedPipelineView, self).paintEvent(event)
if self.scene():
painter = QtGui.QPainter(self.viewport())
for mId, annotatedId in \
self.inspector.annotated_modules.iteritems():
if mId not in self.scene().modules:
# faulty annotated_modules entry
continue
item = self.scene().modules[mId]
br = item.sceneBoundingRect()
rect = QtCore.QRect(self.mapFromScene(br.topLeft()),
self.mapFromScene(br.bottomRight()))
QAnnotatedPipelineView.drawId(painter, rect, annotatedId)
painter.end()
def updateAnnotatedIds(self, pipeline):
""" updateAnnotatedIds(pipeline: Pipeline) -> None
Re-inspect the pipeline to get annotated ids
"""
if pipeline and self.scene():
self.inspector.inspect_ambiguous_modules(pipeline)
self.scene().fitToAllViews(True)
@staticmethod
def drawId(painter, rect, id, align=QtCore.Qt.AlignCenter):
""" drawId(painter: QPainter, rect: QRect, id: int,
align: QtCore.Qt.Align) -> None
Draw the rounded id number on a rectangular area
"""
painter.save()
painter.setRenderHints(QtGui.QPainter.Antialiasing)
painter.setPen(CurrentTheme.ANNOTATED_ID_BRUSH.color())
painter.setBrush(CurrentTheme.ANNOTATED_ID_BRUSH)
font = QtGui.QFont()
font.setStyleStrategy(QtGui.QFont.ForceOutline)
font.setBold(True)
painter.setFont(font)
fm = QtGui.QFontMetrics(font)
size = fm.size(QtCore.Qt.TextSingleLine, str(id))
size = max(size.width(), size.height())
x = rect.left()
if align & QtCore.Qt.AlignHCenter:
x = rect.left() + rect.width()//2-size//2
if align & QtCore.Qt.AlignRight:
x = rect.left() + rect.width()-size
y = rect.top()
if align & QtCore.Qt.AlignVCenter:
y = rect.top() + rect.height()//2-size//2
if align & QtCore.Qt.AlignBottom:
y = rect.top() + rect.height()-size
newRect = QtCore.QRect(x, y, size, size)
painter.drawEllipse(newRect)
painter.setPen(CurrentTheme.ANNOTATED_ID_PEN)
painter.drawText(newRect, QtCore.Qt.AlignCenter, str(id))
painter.restore()
|
VisTrails/VisTrails
|
vistrails/gui/paramexplore/pe_pipeline.py
|
pe_pipeline.py
|
py
| 3,719 |
python
|
en
|
code
| 100 |
github-code
|
6
|
28220979750
|
#Import Necessary Packages
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
import ruptures as rpt
from statistics import stdev
import pandas as pd
def load_rms(path, sect, ref):
raw_string = open('../../' + path + '/rmsd_' + sect + '_ref_' + ref + '.txt').readlines()
#Convert data to fload
raw = np.zeros(len(raw_string))
for i in range(len(raw_string)):
raw[i] = float(raw_string[i])*10
return raw
def plot_compare(RMSD_mean, RMSD_err, Label, sect, n, ref):
rmsd_n = RMSD_mean[:,n]
rmsd_err_n = RMSD_err[:,n]
section = sect[n]
# bar_color = ['gray', 'gray', 'pink', 'blue', 'pink', 'red', 'red']
num = np.linspace(1, len(Label)+1, num = len(Label))
fig = plt.figure(figsize=(18,10))
ax1 = fig.add_subplot(111)
ax1.set_title("RMSD for " + section + ' to ' + ref)
ax1.set_ylabel(r'RMSD($\AA$)')
ax1.bar(num, rmsd_n)
plt.xticks(num, Label, fontsize=14)
plt.errorbar(num, rmsd_n, yerr= rmsd_err_n, fmt='o', color='black')
fig.savefig('RMSD_compare_' + section + '_' + ref + '.png')
plt.close(fig)
def plot_kernel_mut(df, sect_name, sect_file, xmin, xmax):
ax = sns.kdeplot(data = df, fill=True, alpha=0.5, common_grid = True)
plt.setp(ax.get_legend().get_texts(), fontsize='12') # for legend text
plt.xlabel(r'RMSD($\AA$)', fontsize = 14)
plt.xlim(xmin, xmax)
plt.xticks(fontsize = 13)
plt.yticks(fontsize = 13)
plt.ylabel(r'Normalized Density', fontsize = 14)
plt.title(str(sect_name) + r' RMSD Relative to WT Closed', fontsize = 15)
plt.savefig('mutate_RMSD_' + str(sect_file) + '.png')
plt.close()
def plot_kernel_cmpr_lig(apo_df, AD_df, BBR_df, mut, sect, n):
df = pd.concat([apo_df, AD_df, BBR_df])
sns.kdeplot(data = df, fill=True, alpha=0.5, common_norm = True, common_grid = True)
plt.xlabel(r'RMSD($\AA$)', fontsize = 14)
plt.ylabel(r'Normalized Density', fontsize = 14)
plt.title(sect + ' RMSD Compared to WT Closed', fontsize = 15)
plt.savefig('mutate_RMSD_' + sect + '_' + mut + '.png')
plt.close()
def rmsd_sect(sect, file_path_close, file_path_close_AD, file_path_close_BBR, ref, n):
rmsd_1sug = load_rms(file_path_close[0], sect, ref[n])
rmsd_apo = load_rms(file_path_close[1], sect, ref[n])
rmsd_L192F = load_rms(file_path_close[2], sect, ref[n])
rmsd_E276F = load_rms(file_path_close[3], sect, ref[n])
rmsd_F280Y = load_rms(file_path_close[4], sect, ref[n])
rmsd_L195F = load_rms(file_path_close[5], sect, ref[n])
rmsd_F196A = load_rms(file_path_close[6], sect, ref[n])
rmsd_V287T = load_rms(file_path_close[7], sect, ref[n])
rmsd_L192F_AD = load_rms(file_path_close_AD[0], sect, ref[n])
rmsd_L192F_BBR = load_rms(file_path_close_BBR[0], sect, ref[n])
rmsd_E276F_AD = load_rms(file_path_close_AD[1], sect, ref[n])
rmsd_E276F_BBR = load_rms(file_path_close_BBR[1], sect, ref[n])
rmsd_F280Y_AD = load_rms(file_path_close_AD[2], sect, ref[n])
rmsd_F280Y_BBR = load_rms(file_path_close_BBR[2], sect, ref[n])
rmsd_L195F_AD = load_rms(file_path_close_AD[3], sect, ref[n])
rmsd_L195F_BBR = load_rms(file_path_close_BBR[3], sect, ref[n])
rmsd_F196A_AD = load_rms(file_path_close_AD[4], sect, ref[n])
rmsd_F196A_BBR = load_rms(file_path_close_BBR[4], sect, ref[n])
rmsd_V287T_AD = load_rms(file_path_close_AD[4], sect, ref[n])
rmsd_V287T_BBR = load_rms(file_path_close_BBR[4], sect, ref[n])
return rmsd_1sug, rmsd_apo, rmsd_L192F, rmsd_E276F, rmsd_F280Y, rmsd_L195F, rmsd_F196A, rmsd_V287T, rmsd_L192F_AD, rmsd_E276F_AD, rmsd_F280Y_AD, rmsd_L195F_AD, rmsd_F196A_AD, rmsd_V287T_AD, rmsd_L192F_BBR, rmsd_E276F_BBR, rmsd_F280Y_BBR, rmsd_L195F_BBR, rmsd_F196A_BBR, rmsd_V287T_BBR
#File paths for all input files
file_path = ['../Apo_dis/analysis', 'L192F/Apo/analysis', 'E276F/Apo/analysis', 'F280Y/Apo/analysis', 'L195F/Apo/analysis', 'F196A/Apo/analysis', 'V287T/Apo/analysis'] #Indices to rank in order of closest activity to WT to Furthest
file_path_close = ['../Apo_1SUG/analysis/1sug', '../Apo_dis/analysis', 'L192F/Apo/analysis', 'E276F/Apo/analysis', 'F280Y/Apo/analysis', 'L195F/Apo/analysis', 'F196A/Apo/analysis', 'V287T/Apo/analysis']
file_path_close_AD = ['L192F/AD/analysis', 'E276F/AD/analysis', 'F280Y/AD/analysis', 'L195F/AD/analysis', 'F196A/AD/analysis', 'V287T/AD/analysis']
file_path_close_BBR = ['L192F/BBR/analysis', 'E276F/BBR/analysis', 'F280Y/BBR/analysis', 'L195F/BBR/analysis', 'F196A/BBR/analysis', 'V287T/BBR/analysis']
sections = ['WPD', 'WPD_a3', 'SBL', 'beg', 'P', 'CYS', 'a3', 'a3_top', 'a4', 'a5', 'a6', 'a6_bot', 'a7', 'Q']
ref = ['open', 'closed', 'self', 'F196A', 'V287T']
#open all files
RMSD_mean = np.zeros((len(file_path), len(sections))) #Mean for reference open
RMSD_err = np.zeros((len(file_path), len(sections))) #SEM for reference open
RMSD_mean_close = np.zeros((len(file_path_close), len(sections))) #Mean for reference closed
RMSD_err_close = np.zeros((len(file_path_close), len(sections))) #SEM for reference closed
RMSD_mean_close_AD = np.zeros((len(file_path_close_AD), len(sections))) #Mean for reference closed
RMSD_err_close_AD = np.zeros((len(file_path_close_AD), len(sections))) #SEM for reference closed
RMSD_mean_close_BBR = np.zeros((len(file_path_close_BBR), len(sections))) #Mean for reference closed
RMSD_err_close_BBR = np.zeros((len(file_path_close_BBR), len(sections))) #SEM for reference closed
#Save all rmsd values for a3_top, a4_top, and a6 helix
rmsd_a3_1sug, rmsd_a3_apo, rmsd_a3_L192F, rmsd_a3_E276F, rmsd_a3_F280Y, rmsd_a3_L195F, rmsd_a3_F196A, rmsd_a3_V287T, rmsd_a3_L192F_AD, rmsd_a3_E276F_AD, rmsd_a3_F280Y_AD, rmsd_a3_L195F_AD, rmsd_a3_F196A_AD, rmsd_a3_V287T_AD, rmsd_a3_L192F_BBR, rmsd_a3_E276F_BBR, rmsd_a3_F280Y_BBR, rmsd_a3_L195F_BBR, rmsd_a3_F196A_BBR, rmsd_a3_V287T_BBR = rmsd_sect('a3', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_a3_top_1sug, rmsd_a3_top_apo, rmsd_a3_top_L192F, rmsd_a3_top_E276F, rmsd_a3_top_F280Y, rmsd_a3_top_L195F, rmsd_a3_top_F196A, rmsd_a3_top_V287T, rmsd_a3_top_L192F_AD, rmsd_a3_top_E276F_AD, rmsd_a3_top_F280Y_AD, rmsd_a3_top_L195F_AD, rmsd_a3_top_F196A_AD, rmsd_a3_top_V287T_AD, rmsd_a3_top_L192F_BBR, rmsd_a3_top_E276F_BBR, rmsd_a3_top_F280Y_BBR, rmsd_a3_top_L195F_BBR, rmsd_a3_top_F196A_BBR, rmsd_a3_top_V287T_BBR = rmsd_sect('a3_top', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_a4_1sug, rmsd_a4_apo, rmsd_a4_L192F, rmsd_a4_E276F, rmsd_a4_F280Y, rmsd_a4_L195F, rmsd_a4_F196A, rmsd_a4_V287T, rmsd_a4_L192F_AD, rmsd_a4_E276F_AD, rmsd_a4_F280Y_AD, rmsd_a4_L195F_AD, rmsd_a4_F196A_AD, rmsd_a4_V287T_AD, rmsd_a4_L192F_BBR, rmsd_a4_E276F_BBR, rmsd_a4_F280Y_BBR, rmsd_a4_L195F_BBR, rmsd_a4_F196A_BBR, rmsd_a4_V287T_BBR = rmsd_sect('a4', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_a6_1sug, rmsd_a6_apo, rmsd_a6_L192F, rmsd_a6_E276F, rmsd_a6_F280Y, rmsd_a6_L195F, rmsd_a6_F196A, rmsd_a6_V287T, rmsd_a6_L192F_AD, rmsd_a6_E276F_AD, rmsd_a6_F280Y_AD, rmsd_a6_L195F_AD, rmsd_a6_F196A_AD, rmsd_a6_V287T_AD, rmsd_a6_L192F_BBR, rmsd_a6_E276F_BBR, rmsd_a6_F280Y_BBR, rmsd_a6_L195F_BBR, rmsd_a6_F196A_BBR, rmsd_a6_V287T_BBR = rmsd_sect('a6', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_a6_bot_1sug, rmsd_a6_bot_apo, rmsd_a6_bot_L192F, rmsd_a6_bot_E276F, rmsd_a6_bot_F280Y, rmsd_a6_bot_L195F, rmsd_a6_bot_F196A, rmsd_a6_bot_V287T, rmsd_a6_bot_L192F_AD, rmsd_a6_bot_E276F_AD, rmsd_a6_bot_F280Y_AD, rmsd_a6_bot_L195F_AD, rmsd_a6_bot_F196A_AD, rmsd_a6_bot_V287T_AD, rmsd_a6_bot_L192F_BBR, rmsd_a6_bot_E276F_BBR, rmsd_a6_bot_F280Y_BBR, rmsd_a6_bot_L195F_BBR, rmsd_a6_bot_F196A_BBR, rmsd_a6_bot_V287T_BBR = rmsd_sect('a6_bot', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_CYS_1sug, rmsd_CYS_apo, rmsd_CYS_L192F, rmsd_CYS_E276F, rmsd_CYS_F280Y, rmsd_CYS_L195F, rmsd_CYS_F196A, rmsd_CYS_V287T, rmsd_CYS_L192F_AD, rmsd_CYS_E276F_AD, rmsd_CYS_F280Y_AD, rmsd_CYS_L195F_AD, rmsd_CYS_F196A_AD, rmsd_CYS_V287T_AD, rmsd_CYS_L192F_BBR, rmsd_CYS_E276F_BBR, rmsd_CYS_F280Y_BBR, rmsd_CYS_L195F_BBR, rmsd_CYS_F196A_BBR, rmsd_CYS_V287T_BBR = rmsd_sect('CYS', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_beg_1sug, rmsd_beg_apo, rmsd_beg_L192F, rmsd_beg_E276F, rmsd_beg_F280Y, rmsd_beg_L195F, rmsd_beg_F196A, rmsd_beg_V287T, rmsd_beg_L192F_AD, rmsd_beg_E276F_AD, rmsd_beg_F280Y_AD, rmsd_beg_L195F_AD, rmsd_beg_F196A_AD, rmsd_beg_V287T_AD, rmsd_beg_L192F_BBR, rmsd_beg_E276F_BBR, rmsd_beg_F280Y_BBR, rmsd_beg_L195F_BBR, rmsd_beg_F196A_BBR, rmsd_beg_V287T_BBR = rmsd_sect('beg', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
for i in range(len(file_path_close)):
for j in range(len(sections)):
#Load Data for reference open
rmsd_Apo = load_rms(file_path_close[i], sections[j], ref[1])
#Mean and SEM for each trajectory
RMSD_mean_close[i][j] = np.mean(rmsd_Apo)
RMSD_err_close[i][j] = stats.sem(rmsd_Apo)
for i in range(len(file_path)):
#Load Data for reference open
rmsd = load_rms(file_path[i], sections[j], ref[0])
#Mean and SEM for each trajectory
RMSD_mean[i][j] = np.mean(rmsd)
RMSD_err[i][j] = stats.sem(rmsd)
for i in range(len(file_path_close_AD)):
#Load Data for reference open
rmsd_AD = load_rms(file_path_close_AD[i], sections[j], ref[1])
RMSD_mean_close_AD[i][j] = np.mean(rmsd_AD)
RMSD_err_close_AD[i][j] = stats.sem(rmsd_AD)
rmsd_BBR = load_rms(file_path_close_BBR[i], sections[j], ref[1])
RMSD_mean_close_BBR[i][j] = np.mean(rmsd_BBR)
RMSD_err_close_BBR[i][j] = stats.sem(rmsd_BBR)
#Name Labels
Label = ['WT', 'L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T']
Label_close = ['WT Close', 'WT Open', 'L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T']
Labels_mut = ['L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T']
#Plot all compared to WT Open
for i in range(len(sections)):
plot_compare(RMSD_mean, RMSD_err, Label, sections, i, ref[0])
plot_compare(RMSD_mean_close, RMSD_err_close, Label_close, sections, i, ref[1])
#Determine % difference from WT
RMSD_diff = np.zeros((len(sections), len(Labels_mut)))
for i in range(1, len(Label)):
n = i-1
for j in range(len(sections)):
WT = RMSD_mean[0][j]
Mut = RMSD_mean[i][j]
RMSD_diff[j][n] = ((Mut-WT)/((Mut+WT)/2)) * 100
#Plot table comparing residue interactions to WT
ax = plt.figure(figsize=(12, 6), frameon=False) # no visible frame
ax = sns.heatmap(RMSD_diff, annot=False, cmap = 'jet', cbar = True, cbar_kws={'label': 'Percentage Difference from WT'}, vmin = 0, vmax = 150, xticklabels = Labels_mut, yticklabels = sections)
#ax.add_artist(lines.Line2D([0, 20], [7, 7], color = 'black', linestyle= '--', linewidth = 4))
plt.title('Section RMSD Compared to WT')
plt.savefig('mutate_RMSD_Apo.png')
plt.close()
RMSD_mean_mut = np.zeros((len(Label_close), len(sections))) #Mean for reference open
RMSD_err_mut = np.zeros((len(Label_close), len(sections))) #SEM for reference open
#Plot self and two references
for i in [0, 2, 3, 4]:
for j in range(len(sections)):
#Load Data
RMSD_mean_mut[0][j] = RMSD_mean_close[0][j]
RMSD_err_mut[0][j] = RMSD_err_close[0][j]
for k in range(1, len(Label_close)):
rmsd = load_rms(file_path_close[k], sections[j], ref[i])
RMSD_mean_mut[k][j] = np.mean(rmsd)
RMSD_err_mut[k][j] = stats.sem(rmsd)
plot_compare(RMSD_mean_mut, RMSD_err_mut, Label_close, sections, j, ref[i])
#Plot Kernel DEnsity Estimate Plot
#Compare a3_top for L192F, E276F, L195F, V287T
a3_top_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a3_top_apo})
a3_top_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a3_top_1sug})
a3_top_L192F_df = pd.DataFrame({'L192F': rmsd_a3_top_L192F})
a3_top_L195F_df = pd.DataFrame({'L195F': rmsd_a3_top_L195F})
a3_top_F280Y_df = pd.DataFrame({'F280Y': rmsd_a3_top_F280Y})
a3_top_E276F_df = pd.DataFrame({'E276F': rmsd_a3_top_E276F})
a3_top_F196A_df = pd.DataFrame({'F196A': rmsd_a3_top_F196A})
a3_top_V287T_df = pd.DataFrame({'V287T': rmsd_a3_top_V287T})
a3_top_L192F_AD_df = pd.DataFrame({'L192F AD': rmsd_a3_top_L192F_AD})
a3_top_L195F_AD_df = pd.DataFrame({'L195F AD': rmsd_a3_top_L195F_AD})
a3_top_F280Y_AD_df = pd.DataFrame({'F280Y AD': rmsd_a3_top_F280Y_AD})
a3_top_E276F_AD_df = pd.DataFrame({'E276F AD': rmsd_a3_top_E276F_AD})
a3_top_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a3_top_F196A_AD})
a3_top_V287T_AD_df = pd.DataFrame({'V287T AD': rmsd_a3_top_V287T_AD})
a3_top_L192F_BBR_df = pd.DataFrame({'L192F BBR': rmsd_a3_top_L192F_BBR})
a3_top_L195F_BBR_df = pd.DataFrame({'L195F BBR': rmsd_a3_top_L195F_BBR})
a3_top_F280Y_BBR_df = pd.DataFrame({'F280Y BBR': rmsd_a3_top_F280Y_BBR})
a3_top_E276F_BBR_df = pd.DataFrame({'E276F BBR': rmsd_a3_top_E276F_BBR})
a3_top_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a3_top_F196A_BBR})
a3_top_V287T_BBR_df = pd.DataFrame({'V287T BBR': rmsd_a3_top_V287T_BBR})
df = pd.concat([a3_top_Apo_open_df, a3_top_Apo_close_df, a3_top_L192F_df, a3_top_E276F_df, a3_top_V287T_df, a3_top_F196A_df, a3_top_F280Y_df, a3_top_L195F_df])
plot_kernel_mut(df, r'Top of $\alpha$3', 'a3_top_all', 0, 2)
df = pd.concat([a3_top_L192F_df, a3_top_E276F_df, a3_top_V287T_df, a3_top_F196A_df, a3_top_F280Y_df, a3_top_L195F_df])
plot_kernel_mut(df, r'Top of $\alpha$3', 'a3_top_mut_all', 0, 2)
df = pd.concat([a3_top_Apo_open_df, a3_top_Apo_close_df, a3_top_V287T_df, a3_top_F280Y_df])
plot_kernel_mut(df, r'Top of $\alpha$3', 'a3_top_extr', 0, 2)
plot_kernel_cmpr_lig(a3_top_L192F_df, a3_top_L192F_AD_df, a3_top_L192F_BBR_df, 'L192F', sections[7], 7)
plot_kernel_cmpr_lig(a3_top_L195F_df, a3_top_L195F_AD_df, a3_top_L195F_BBR_df, 'L195F', sections[7], 7)
plot_kernel_cmpr_lig(a3_top_E276F_df, a3_top_E276F_AD_df, a3_top_E276F_BBR_df, 'E276F', sections[7], 7)
plot_kernel_cmpr_lig(a3_top_V287T_df, a3_top_V287T_AD_df, a3_top_V287T_BBR_df, 'V287T', sections[7], 7)
#Compare a3_top for L192F, E276F, L195F, V287T
a3_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a3_apo})
a3_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a3_1sug})
a3_L192F_df = pd.DataFrame({'L192F': rmsd_a3_L192F})
a3_L195F_df = pd.DataFrame({'L195F': rmsd_a3_L195F})
a3_F280Y_df = pd.DataFrame({'F280Y': rmsd_a3_F280Y})
a3_E276F_df = pd.DataFrame({'E276F': rmsd_a3_E276F})
a3_F196A_df = pd.DataFrame({'F196A': rmsd_a3_F196A})
a3_V287T_df = pd.DataFrame({'V287T': rmsd_a3_V287T})
a3_L192F_AD_df = pd.DataFrame({'L192F AD': rmsd_a3_L192F_AD})
a3_L195F_AD_df = pd.DataFrame({'L195F AD': rmsd_a3_L195F_AD})
a3_F280Y_AD_df = pd.DataFrame({'F280Y AD': rmsd_a3_F280Y_AD})
a3_E276F_AD_df = pd.DataFrame({'E276F AD': rmsd_a3_E276F_AD})
a3_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a3_F196A_AD})
a3_V287T_AD_df = pd.DataFrame({'V287T AD': rmsd_a3_V287T_AD})
a3_L192F_BBR_df = pd.DataFrame({'L192F BBR': rmsd_a3_L192F_BBR})
a3_L195F_BBR_df = pd.DataFrame({'L195F BBR': rmsd_a3_L195F_BBR})
a3_F280Y_BBR_df = pd.DataFrame({'F280Y BBR': rmsd_a3_F280Y_BBR})
a3_E276F_BBR_df = pd.DataFrame({'E276F BBR': rmsd_a3_E276F_BBR})
a3_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a3_F196A_BBR})
a3_V287T_BBR_df = pd.DataFrame({'V287T BBR': rmsd_a3_V287T_BBR})
df = pd.concat([a3_Apo_open_df, a3_Apo_close_df, a3_L192F_df, a3_E276F_df, a3_V287T_df, a3_F196A_df, a3_F280Y_df, a3_L195F_df])
plot_kernel_mut(df, r'$\alpha$3', 'a3_all', 0, 2)
df = pd.concat([a3_L192F_df, a3_E276F_df, a3_V287T_df, a3_F196A_df, a3_F280Y_df, a3_L195F_df])
plot_kernel_mut(df, r'$\alpha$3', 'a3_mut_all', 0, 2)
df = pd.concat([a3_Apo_open_df, a3_Apo_close_df, a3_V287T_df, a3_F280Y_df])
plot_kernel_mut(df, r'$\alpha$3', 'a3_mut_extr', 0, 2)
plot_kernel_cmpr_lig(a3_L192F_df, a3_L192F_AD_df, a3_L192F_BBR_df, 'L192F', sections[6], 6)
plot_kernel_cmpr_lig(a3_L195F_df, a3_L195F_AD_df, a3_L195F_BBR_df, 'L195F', sections[6], 6)
plot_kernel_cmpr_lig(a3_E276F_df, a3_E276F_AD_df, a3_E276F_BBR_df, 'E276F', sections[6], 6)
plot_kernel_cmpr_lig(a3_V287T_df, a3_V287T_AD_df, a3_V287T_BBR_df, 'V287T', sections[6], 6)
#Compare a4 for L192F, E276F, L195F, V287T
a4_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a4_apo})
a4_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a4_1sug})
a4_L192F_df = pd.DataFrame({'L192F': rmsd_a4_L192F})
a4_L195F_df = pd.DataFrame({'L195F': rmsd_a4_L195F})
a4_F280Y_df = pd.DataFrame({'F280Y': rmsd_a4_F280Y})
a4_E276F_df = pd.DataFrame({'E276F': rmsd_a4_E276F})
a4_F196A_df = pd.DataFrame({'F196A': rmsd_a4_F196A})
a4_V287T_df = pd.DataFrame({'V287T': rmsd_a4_V287T})
a4_L192F_AD_df = pd.DataFrame({'L192F AD': rmsd_a4_L192F_AD})
a4_L195F_AD_df = pd.DataFrame({'L195F AD': rmsd_a4_L195F_AD})
a4_F280Y_AD_df = pd.DataFrame({'F280Y AD': rmsd_a4_F280Y_AD})
a4_E276F_AD_df = pd.DataFrame({'E276F AD': rmsd_a4_E276F_AD})
a4_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a4_F196A_AD})
a4_V287T_AD_df = pd.DataFrame({'V287T AD': rmsd_a4_V287T_AD})
a4_L192F_BBR_df = pd.DataFrame({'L192F BBR': rmsd_a4_L192F_BBR})
a4_L195F_BBR_df = pd.DataFrame({'L195F BBR': rmsd_a4_L195F_BBR})
a4_F280Y_BBR_df = pd.DataFrame({'F280Y BBR': rmsd_a4_F280Y_BBR})
a4_E276F_BBR_df = pd.DataFrame({'E276F BBR': rmsd_a4_E276F_BBR})
a4_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a4_F196A_BBR})
a4_V287T_BBR_df = pd.DataFrame({'V287T BBR': rmsd_a4_V287T_BBR})
df = pd.concat([a4_Apo_open_df, a4_Apo_close_df, a4_L192F_df, a4_E276F_df, a4_V287T_df, a4_F196A_df, a4_F280Y_df, a4_L195F_df])
plot_kernel_mut(df, r'$\alpha$4', 'a4_all', 0, 1.5)
df = pd.concat([a4_L192F_df, a4_E276F_df, a4_V287T_df, a4_F196A_df, a4_F280Y_df, a4_L195F_df])
plot_kernel_mut(df, r'$\alpha$4', 'a4_mut_all',0, 1.5)
df = pd.concat([a4_Apo_open_df, a4_Apo_close_df, a4_V287T_df, a4_F196A_df, a4_F280Y_df])
plot_kernel_mut(df, r'$\alpha$4', 'a4', 0, 1.5)
plot_kernel_cmpr_lig(a4_F196A_df, a4_F196A_AD_df, a4_F196A_BBR_df, 'F196A', sections[8], 8)
plot_kernel_cmpr_lig(a4_F280Y_df, a4_F280Y_AD_df, a4_F280Y_BBR_df, 'F280Y', sections[8], 8)
#a4_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a4_apo_rapo})
#a4_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a4_1sug_rapo})
#a4_F196A_df = pd.DataFrame({'F196A': rmsd_a4_F196A_rapo})
#a4_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a4_F196A_AD_rapo})
#a4_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a4_F196A_BBR_rapo})
#df = pd.concat([a4_Apo_open_df, a4_Apo_close_df, a4_F196A_df, a4_F196A_AD_df, a4_F196A_BBR_df])
#ax = plt.figure(figsize=(12, 6), frameon=False) # no visible frame
#sns.kdeplot(data = df, fill=True, alpha=0.5, common_norm = True, common_grid = True)
#plt.xlabel(r'RMSD($\AA$)')
#plt.ylabel(r'Normalized Density')
#plt.title(r'$\alpha$-4 RMSD Compared to Apo F196A')
#plt.savefig('mutate_RMSD_a4_ref_F196A.png')
#plt.close()
#a6 comparison
a6_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a6_apo})
a6_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a6_1sug})
a6_L192F_df = pd.DataFrame({'L192F': rmsd_a6_L192F})
a6_L195F_df = pd.DataFrame({'L195F': rmsd_a6_L195F})
a6_F280Y_df = pd.DataFrame({'F280Y': rmsd_a6_F280Y})
a6_E276F_df = pd.DataFrame({'E276F': rmsd_a6_E276F})
a6_F196A_df = pd.DataFrame({'F196A': rmsd_a6_F196A})
a6_V287T_df = pd.DataFrame({'V287T': rmsd_a6_V287T})
a6_L192F_AD_df = pd.DataFrame({'L192F AD': rmsd_a6_L192F_AD})
a6_L195F_AD_df = pd.DataFrame({'L195F AD': rmsd_a6_L195F_AD})
a6_F280Y_AD_df = pd.DataFrame({'F280Y AD': rmsd_a6_F280Y_AD})
a6_E276F_AD_df = pd.DataFrame({'E276F AD': rmsd_a6_E276F_AD})
a6_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a6_F196A_AD})
a6_V287T_AD_df = pd.DataFrame({'V287T AD': rmsd_a6_V287T_AD})
a6_L192F_BBR_df = pd.DataFrame({'L192F BBR': rmsd_a6_L192F_BBR})
a6_L195F_BBR_df = pd.DataFrame({'L195F BBR': rmsd_a6_L195F_BBR})
a6_F280Y_BBR_df = pd.DataFrame({'F280Y BBR': rmsd_a6_F280Y_BBR})
a6_E276F_BBR_df = pd.DataFrame({'E276F BBR': rmsd_a6_E276F_BBR})
a6_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a6_F196A_BBR})
a6_V287T_BBR_df = pd.DataFrame({'V287T BBR': rmsd_a6_V287T_BBR})
df = pd.concat([a6_Apo_open_df, a6_Apo_close_df, a6_L192F_df, a6_E276F_df, a6_V287T_df, a6_F196A_df, a6_F280Y_df, a6_L195F_df])
plot_kernel_mut(df, r'$\alpha$6', 'a6_all', 0, 2)
df = pd.concat([a6_L192F_df, a6_E276F_df, a6_V287T_df, a6_F196A_df, a6_F280Y_df, a6_L195F_df])
plot_kernel_mut(df, r'$\alpha$6', 'a6_mut_all', 0, 2)
plot_kernel_cmpr_lig(a6_L192F_df, a6_L192F_AD_df, a6_L192F_BBR_df, 'L192F', sections[11], 11)
plot_kernel_cmpr_lig(a6_L195F_df, a6_L195F_AD_df, a6_L195F_BBR_df, 'L195F', sections[11], 11)
plot_kernel_cmpr_lig(a6_E276F_df, a6_E276F_AD_df, a6_E276F_BBR_df, 'E276F', sections[11], 11)
plot_kernel_cmpr_lig(a6_V287T_df, a6_V287T_AD_df, a6_V287T_BBR_df, 'V287T', sections[11], 11)
#Just CYS215
cys_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_CYS_apo})
cys_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_CYS_1sug})
cys_L192F_df = pd.DataFrame({'L192F': rmsd_CYS_L192F})
cys_L195F_df = pd.DataFrame({'L195F': rmsd_CYS_L195F})
cys_F280Y_df = pd.DataFrame({'F280Y': rmsd_CYS_F280Y})
cys_E276F_df = pd.DataFrame({'E276F': rmsd_CYS_E276F})
cys_F196A_df = pd.DataFrame({'F196A': rmsd_CYS_F196A})
cys_V287T_df = pd.DataFrame({'V287T': rmsd_CYS_V287T})
df = pd.concat([cys_Apo_open_df, cys_Apo_close_df, cys_L192F_df, cys_E276F_df, cys_V287T_df, cys_F196A_df, cys_F280Y_df, cys_L195F_df])
plot_kernel_mut(df, 'CYS215', 'cys_all', 0, 1)
df = pd.concat([cys_L192F_df, cys_E276F_df, cys_V287T_df, cys_F196A_df, cys_F280Y_df, cys_L195F_df])
plot_kernel_mut(df, 'CYS215', 'cys_mut_all', 0, 1)
rmsd_cys = [rmsd_CYS_1sug, rmsd_CYS_apo, rmsd_CYS_F196A]
ax = plt.figure(figsize=(12, 6), frameon=False) # no visible frame
sns.kdeplot(data = rmsd_cys, fill=True, alpha=0.5)
plt.title('CYS215 RMSD Compared to WT Closed')
plt.savefig('mutate_RMSD_cys_F196A.png')
plt.close()
#BEG loop (L1)
beg_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_beg_apo})
beg_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_beg_1sug})
beg_L192F_df = pd.DataFrame({'L192F': rmsd_beg_L192F})
beg_L195F_df = pd.DataFrame({'L195F': rmsd_beg_L195F})
beg_F280Y_df = pd.DataFrame({'F280Y': rmsd_beg_F280Y})
beg_E276F_df = pd.DataFrame({'E276F': rmsd_beg_E276F})
beg_F196A_df = pd.DataFrame({'F196A': rmsd_beg_F196A})
beg_V287T_df = pd.DataFrame({'V287T': rmsd_beg_V287T})
df = pd.concat([beg_Apo_open_df, beg_Apo_close_df, beg_L192F_df, beg_E276F_df, beg_V287T_df, beg_F196A_df, beg_F280Y_df, beg_L195F_df])
plot_kernel_mut(df, 'L1', 'beg_all', 0, 4)
df = pd.concat([beg_L192F_df, beg_E276F_df, beg_V287T_df, beg_F196A_df, beg_F280Y_df, beg_L195F_df])
plot_kernel_mut(df, 'L1', 'beg_mut_all', 0, 4)
#Determine p-values for each of the sections of focus
file_p = open('p_values_mut.txt', 'w')
p = np.zeros((5, 7))
st, p[0,0] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_L192F, equal_var = False) #Welch's t-test
st, p[0,1] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_E276F, equal_var = False) #Welch's t-test
st, p[0,2] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_F280Y, equal_var = False) #Welch's t-test
st, p[0,3] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_L195F, equal_var = False) #Welch's t-test
st, p[0,4] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_F196A, equal_var = False) #Welch's t-test
st, p[0,5] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_V287T, equal_var = False) #Welch's t-test
st, p[0,6] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_1sug, equal_var = False) #Welch's t-test
st, p[1,0] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_L192F, equal_var = False) #Welch's t-test
st, p[1,1] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_E276F, equal_var = False) #Welch's t-test
st, p[1,2] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_F280Y, equal_var = False) #Welch's t-test
st, p[1,3] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_L195F, equal_var = False) #Welch's t-test
st, p[1,4] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_F196A, equal_var = False) #Welch's t-test
st, p[1,5] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_V287T, equal_var = False) #Welch's t-test
st, p[1,6] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_1sug, equal_var = False) #Welch's t-test
st, p[1,0] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_L192F, equal_var = False) #Welch's t-test
st, p[1,1] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_E276F, equal_var = False) #Welch's t-test
st, p[1,2] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_F280Y, equal_var = False) #Welch's t-test
st, p[1,3] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_L195F, equal_var = False) #Welch's t-test
st, p[1,4] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_F196A, equal_var = False) #Welch's t-test
st, p[1,5] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_V287T, equal_var = False) #Welch's t-test
st, p[1,6] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_1sug, equal_var = False) #Welch's t-test
st, p[2,0] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_L192F, equal_var = False) #Welch's t-test
st, p[2,1] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_E276F, equal_var = False) #Welch's t-test
st, p[2,2] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_F280Y, equal_var = False) #Welch's t-test
st, p[2,3] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_L195F, equal_var = False) #Welch's t-test
st, p[2,4] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_F196A, equal_var = False) #Welch's t-test
st, p[2,5] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_V287T, equal_var = False) #Welch's t-test
st, p[2,6] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_1sug, equal_var = False) #Welch's t-test
st, p[3,0] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_L192F, equal_var = False) #Welch's t-test
st, p[3,1] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_E276F, equal_var = False) #Welch's t-test
st, p[3,2] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_F280Y, equal_var = False) #Welch's t-test
st, p[3,3] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_L195F, equal_var = False) #Welch's t-test
st, p[3,4] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_F196A, equal_var = False) #Welch's t-test
st, p[3,5] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_V287T, equal_var = False) #Welch's t-test
st, p[3,6] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_1sug, equal_var = False) #Welch's t-test
sections_mini = ['a3_top', 'a3', 'a4', 'a6_bot']
Labels_mut = ['L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T', 'Apo Closed']
file_p.write('P values of RMSD with Apo closed reference structure Relative to Apo Open \n')
for i in range(len(sections_mini)):
file_p.write(str(sections_mini[i]) + '\n')
for j in range(len(Labels_mut)):
file_p.write(Labels_mut[j] + ': ' + str(p[i,j]) + '\n')
p = np.zeros((5, 12))
st, p[0,0] = stats.ttest_ind(rmsd_a3_top_L192F, rmsd_a3_top_L192F_AD, equal_var = False) #Welch's t-test
st, p[0,1] = stats.ttest_ind(rmsd_a3_top_E276F, rmsd_a3_top_E276F_AD, equal_var = False) #Welch's t-test
st, p[0,2] = stats.ttest_ind(rmsd_a3_top_F280Y, rmsd_a3_top_F280Y_AD, equal_var = False) #Welch's t-test
st, p[0,3] = stats.ttest_ind(rmsd_a3_top_L195F, rmsd_a3_top_L195F_AD, equal_var = False) #Welch's t-test
st, p[0,4] = stats.ttest_ind(rmsd_a3_top_F196A, rmsd_a3_top_F196A_AD, equal_var = False) #Welch's t-test
st, p[0,5] = stats.ttest_ind(rmsd_a3_top_V287T, rmsd_a3_top_V287T_AD, equal_var = False) #Welch's t-test
st, p[0,6] = stats.ttest_ind(rmsd_a3_top_L192F, rmsd_a3_top_L192F_BBR, equal_var = False) #Welch's t-test
st, p[0,7] = stats.ttest_ind(rmsd_a3_top_E276F, rmsd_a3_top_E276F_BBR, equal_var = False) #Welch's t-test
st, p[0,8] = stats.ttest_ind(rmsd_a3_top_F280Y, rmsd_a3_top_F280Y_BBR, equal_var = False) #Welch's t-test
st, p[0,9] = stats.ttest_ind(rmsd_a3_top_L195F, rmsd_a3_top_L195F_BBR, equal_var = False) #Welch's t-test
st, p[0,10] = stats.ttest_ind(rmsd_a3_top_F196A, rmsd_a3_top_F196A_BBR, equal_var = False) #Welch's t-test
st, p[0,11] = stats.ttest_ind(rmsd_a3_top_V287T, rmsd_a3_top_V287T_BBR, equal_var = False) #Welch's t-test
st, p[1,0] = stats.ttest_ind(rmsd_a3_L192F, rmsd_a3_L192F_AD, equal_var = False) #Welch's t-test
st, p[1,1] = stats.ttest_ind(rmsd_a3_E276F, rmsd_a3_E276F_AD, equal_var = False) #Welch's t-test
st, p[1,2] = stats.ttest_ind(rmsd_a3_F280Y, rmsd_a3_F280Y_AD, equal_var = False) #Welch's t-test
st, p[1,3] = stats.ttest_ind(rmsd_a3_L195F, rmsd_a3_L195F_AD, equal_var = False) #Welch's t-test
st, p[1,4] = stats.ttest_ind(rmsd_a3_F196A, rmsd_a3_F196A_AD, equal_var = False) #Welch's t-test
st, p[1,5] = stats.ttest_ind(rmsd_a3_V287T, rmsd_a3_V287T_AD, equal_var = False) #Welch's t-test
st, p[1,6] = stats.ttest_ind(rmsd_a3_L192F, rmsd_a3_L192F_BBR, equal_var = False) #Welch's t-test
st, p[1,7] = stats.ttest_ind(rmsd_a3_E276F, rmsd_a3_E276F_BBR, equal_var = False) #Welch's t-test
st, p[1,8] = stats.ttest_ind(rmsd_a3_F280Y, rmsd_a3_F280Y_BBR, equal_var = False) #Welch's t-test
st, p[1,9] = stats.ttest_ind(rmsd_a3_L195F, rmsd_a3_L195F_BBR, equal_var = False) #Welch's t-test
st, p[1,10] = stats.ttest_ind(rmsd_a3_F196A, rmsd_a3_F196A_BBR, equal_var = False) #Welch's t-test
st, p[1,11] = stats.ttest_ind(rmsd_a3_V287T, rmsd_a3_V287T_BBR, equal_var = False) #Welch's t-test
st, p[2,0] = stats.ttest_ind(rmsd_a4_L192F, rmsd_a4_L192F_AD, equal_var = False) #Welch's t-test
st, p[2,1] = stats.ttest_ind(rmsd_a4_E276F, rmsd_a4_E276F_AD, equal_var = False) #Welch's t-test
st, p[2,2] = stats.ttest_ind(rmsd_a4_F280Y, rmsd_a4_F280Y_AD, equal_var = False) #Welch's t-test
st, p[2,3] = stats.ttest_ind(rmsd_a4_L195F, rmsd_a4_L195F_AD, equal_var = False) #Welch's t-test
st, p[2,4] = stats.ttest_ind(rmsd_a4_F196A, rmsd_a4_F196A_AD, equal_var = False) #Welch's t-test
st, p[2,5] = stats.ttest_ind(rmsd_a4_V287T, rmsd_a4_V287T_AD, equal_var = False) #Welch's t-test
st, p[2,6] = stats.ttest_ind(rmsd_a4_L192F, rmsd_a4_L192F_BBR, equal_var = False) #Welch's t-test
st, p[2,7] = stats.ttest_ind(rmsd_a4_E276F, rmsd_a4_E276F_BBR, equal_var = False) #Welch's t-test
st, p[2,8] = stats.ttest_ind(rmsd_a4_F280Y, rmsd_a4_F280Y_BBR, equal_var = False) #Welch's t-test
st, p[2,9] = stats.ttest_ind(rmsd_a4_L195F, rmsd_a4_L195F_BBR, equal_var = False) #Welch's t-test
st, p[2,10] = stats.ttest_ind(rmsd_a4_F196A, rmsd_a4_F196A_BBR, equal_var = False) #Welch's t-test
st, p[2,11] = stats.ttest_ind(rmsd_a4_V287T, rmsd_a4_V287T_BBR, equal_var = False) #Welch's t-test
sections_mini = ['a3_top', 'a3', 'a4']
Labels_mut = ['L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T']
file_p.write('P values of RMSD with Apo closed reference structure Relative to Apo Mut \n')
for i in range(len(sections_mini)):
file_p.write(str(sections_mini[i]) + '\n')
for j in range(len(Labels_mut)):
n = j+6
file_p.write(Labels_mut[j] + ' AD: ' + str(p[i,j]) + '\n')
file_p.write(Labels_mut[j] + ' BBR: ' + str(p[i,n]) + '\n')
|
ajfriedman22/PTP1B
|
compare_mutant_scripts/rmsd_mut_compare.py
|
rmsd_mut_compare.py
|
py
| 30,484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6056862050
|
from datetime import datetime
import csv
def logReceivedGossip(file,gossipID,spreader,audience,awardedSP,targetCitizensSP,receivingAudienceKnownRumours,citizen_list,rumourTarget,sentiment):
now = datetime.now()
date_time = now.strftime("%m/%d/%Y %H:%M:%S:%f")
# get total rumour count
for key in citizen_list: kt = sum(len(x['knownRumours']) for x in citizen_list.values() if x)
#'time,key,id,spreader,audience,sp,originalsp,audienceKnownRumours,totalRumours,'
with open(file, 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([str(date_time),str(gossipID),spreader,audience,str(awardedSP),str(targetCitizensSP),str(len(receivingAudienceKnownRumours)),rumourTarget,sentiment,kt])
def logUpdateMessage(message,file,action='a'):
f = open(file, action)
f.write(message)
f.close()
|
murchie85/gossipSimulator
|
game/functions/logging.py
|
logging.py
|
py
| 822 |
python
|
en
|
code
| 25 |
github-code
|
6
|
70835728188
|
import json
fs = open("G:\python\Analysis"+"\\"+'score.json', encoding='utf-8')
ft = open("G:\python\Analysis"+"\\"+'template.json', encoding='utf-8')
res1 = fs.read()
data = json.loads(res1)
res2 = ft.read()
template = json.loads(res2)
scoreKey = []
templateKey = template.keys()
goal = {}
for key in data:
user_id = str(key)
cases = data[key]['cases']
cid = []
res = []
for case in cases:
if case["score"] == 100:
cid.append(case['case_id'])
for i in templateKey:
if i in cid:
res.append(1)
else:
res.append(0)
goal[user_id] = res
json_str = json.dumps(goal, indent=4, ensure_ascii=False)
with open("G:\python\Analysis"+"\\"+"flag.json", 'w', encoding='utf-8') as json_file:
json_file.write(json_str)
|
nju161250023/Analysis
|
createFlag.py
|
createFlag.py
|
py
| 803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27196052204
|
import Node
def readFile(filePath, heuristic=None):
if heuristic is None:
heuristic = {}
with open("./data/" + filePath, 'r') as f:
data = f.read().splitlines()
initialState, goalState = None, [None]
graph = dict()
count = 0
for line in data:
if line == '' or line == '#':
continue
elif count == 0:
initialState = line
count += 1
continue
elif count == 1:
goalState = line.split()
count += 1
continue
else:
transitions(line, graph)
graph = initializeGraph(graph, heuristic)
return graph, initialState, goalState
def readHeuristic(filePath):
with open("./data/" + filePath, 'r') as f:
data = f.read().splitlines()
heuristic = dict()
for line in data:
city, heuristicValue = line.split(" ")
city = city[:-1]
heuristic[city] = int(heuristicValue)
return heuristic
def transitions(line, graph):
city = line.split()[0][:-1]
neighbours = [tuple(neighbour.split(',')) for neighbour in line.split()[1:]]
node = Node.Node(city, {})
graph[city] = (node, neighbours)
def initializeGraph(graph, heuristic):
for x, (node, edges) in graph.items():
for edge in edges:
neighbor_name, weight = edge
node.edges[graph[neighbor_name][0]] = int(weight)
if heuristic:
node.heuristic = heuristic[node.city]
return {k: v[0] for k, v in graph.items()}
def setNodes(initialState, goalState, graph):
initialNode, goalNodes = None, []
if initialState in graph:
initialNode = graph[initialState]
for states in goalState:
if states in graph:
goalNodes.append(graph[states])
return initialNode, goalNodes
def calculateTotalCost(copy):
totalCost = 0
parent1 = copy.parent
while copy.parent:
parent1 = copy.parent
for key, value in parent1.edges.items():
if key.city == copy.city:
totalCost += value
copy = copy.parent
return totalCost
|
EdiProdan/FER
|
Introduction to Artificial Intelligence/laboratory_exercise_1/utils.py
|
utils.py
|
py
| 2,163 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33062234730
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*
import requests
class MetrikaAPI(object):
def __init__(self, counter_id, token, host='https://api-metrika.yandex.ru'):
self.counter_id = counter_id
self.token = token
self.host = host
def _get_url(self, url='/stat/v1/data', params=None, data=None, method='GET'):
req = requests.request(
method=method,
url=self.host + url,
params=params,
data=data,
headers={'Authorization': 'OAuth ' + self.token},
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
print(req.content)
raise
except Exception:
print("Unexpected exception")
raise
return req
def get_sources_visits(self):
req = self._get_url(params=dict(
metrics='ym:s:visits',
id=self.counter_id,
))
return req.json()
def get_sources_users(self):
req = self._get_url(params=dict(
metrics='ym:s:users',
id=self.counter_id,
))
return req.json()
def get_sources_pageviews(self):
req = self._get_url(params=dict(
metrics='ym:s:pageviews',
id=self.counter_id,
))
return req.json()
def main():
d = MetrikaAPI(44138734, 'тут мог бы быть токен')
vis = d.get_sources_visits()
us = d.get_sources_users()
view = d.get_sources_pageviews()
print('Всего визитов: {}'.format(vis['data'][0]['metrics']))
print('Всего посетителей: {}'.format(us['data'][0]['metrics']))
print('Всего просмотров: {}'.format(view['data'][0]['metrics']))
if __name__ == '__main__':
main()
|
swetlanka/py3
|
3-5/3-5.py
|
3-5.py
|
py
| 1,824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74281365627
|
from __future__ import annotations
import re
from dataclasses import asdict, dataclass
from typing import Optional, Sized, TypeVar
import torch
import torchinfo
from accelerate.utils.random import set_seed
from torch.utils.data import DataLoader
from simpletrainer.utils.common import pretty_repr
T = TypeVar('T')
set_seed = set_seed
@dataclass
class DataInfo:
batch_size: int
batch_size_per_device: int
num_sampels: Optional[int]
num_batches_per_epoch: Optional[int]
def __repr__(self) -> str:
return pretty_repr(asdict(self), self.__class__.__name__)
def get_batch_size_from_dataloader(dataloader: DataLoader) -> int:
if dataloader.batch_size is None:
try:
return dataloader.batch_sampler.batch_size # type: ignore
except AttributeError:
raise ValueError(
'Can not get batch size from dataloader, does not support `BatchSampler` with varying batch size yet.'
)
else:
return dataloader.batch_size
def get_num_samples_from_dataloader(dataloader: DataLoader) -> Optional[int]:
if isinstance(dataloader.dataset, Sized):
return len(dataloader.dataset)
elif isinstance(dataloader.sampler, Sized):
return len(dataloader.sampler)
else:
sampler = getattr(dataloader.batch_sampler, 'sampler')
if isinstance(sampler, Sized):
return len(sampler)
else:
return
def get_data_info(dataloader: DataLoader, world_size: int = 1) -> DataInfo:
num_samples = get_num_samples_from_dataloader(dataloader)
try:
num_batches_per_epoch = len(dataloader)
except:
num_batches_per_epoch = None
batch_size_per_device = get_batch_size_from_dataloader(dataloader)
batch_size = batch_size_per_device * world_size
return DataInfo(
batch_size=batch_size,
batch_size_per_device=batch_size_per_device,
num_sampels=num_samples,
num_batches_per_epoch=num_batches_per_epoch,
)
def get_model_info(
model: torch.nn.Module,
input_data: Optional[torchinfo.torchinfo.INPUT_DATA_TYPE] = None,
device: Optional[torch.device] = None,
) -> torchinfo.ModelStatistics:
try:
model_statistics = torchinfo.summary(model, input_data=input_data, verbose=0, device=device)
except Exception:
model_statistics = torchinfo.summary(model, verbose=0, device=device)
return model_statistics
def get_parameter_id_group_map(
optimizer: torch.optim.Optimizer,
) -> dict[int, str]:
parameter_id_group_map = {}
for group, params in enumerate(optimizer.param_groups):
for param in params['params']:
parameter_id_group_map[id(param)] = str(group)
return parameter_id_group_map
def get_params_with_pattern(model: torch.nn.Module, pattern: re.Pattern):
params = []
for name, param in model.named_parameters():
if pattern.search(name):
params.append(param)
return params
def get_module_learning_rate_summary(module: torch.nn.Module, optimizer: torch.optim.Optimizer):
lr_dict: dict[str, float] = {}
names = {param: name for name, param in module.named_parameters()}
for group in optimizer.param_groups:
if 'lr' not in group:
continue
lr = group['lr']
for param in group['params']:
if param.requires_grad:
lr_dict[names[param]] = lr
else:
lr_dict[names[param]] = 0.0
return lr_dict
def get_module_parameter_summary(model: torch.nn.Module):
parameter_mean: dict[str, float] = {}
parameter_std: dict[str, float] = {}
for name, param in model.named_parameters():
if param.data.numel() > 0:
parameter_mean[name] = float(param.data.mean().item())
if param.data.numel() > 1:
parameter_std[name] = float(param.data.std().item())
return parameter_mean, parameter_std
def get_module_gradient_summary(model: torch.nn.Module):
gradient_mean: dict[str, float] = {}
gradient_std: dict[str, float] = {}
for name, param in model.named_parameters():
if param.grad is not None:
if param.grad.is_sparse:
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0:
gradient_mean[name] = float(grad_data.mean().item())
if grad_data.numel() > 1:
gradient_std[name] = float(grad_data.std().item())
return gradient_mean, gradient_std
|
Moka-AI/simpletrainer
|
simpletrainer/utils/torch.py
|
torch.py
|
py
| 4,653 |
python
|
en
|
code
| 3 |
github-code
|
6
|
33229412854
|
# Implement the first move model for the Lego robot.
# 02_a_filter_motor
# Claus Brenner, 31 OCT 2012
from math import sin, cos, pi
from pylab import *
from lego_robot import *
# This function takes the old (x, y, heading) pose and the motor ticks
# (ticks_left, ticks_right) and returns the new (x, y, heading).
def filter_step(old_pose, motor_ticks, ticks_to_mm, robot_width):
l = ticks_to_mm * motor_ticks[0]
r = ticks_to_mm * motor_ticks[1]
# Find out if there is a turn at all.
if motor_ticks[0] == motor_ticks[1]:
# No turn. Just drive straight.
theta = old_pose[2];
x = old_pose[0] + (l * cos(theta));
y = old_pose[1] + (l * sin(theta));
else:
# Turn. Compute alpha, R, etc.
alpha = (r - l)/robot_width;
R = l/alpha;
cx = old_pose[0] - (R + robot_width/2)*sin(old_pose[2]);
cy = old_pose[1] + (R + robot_width/2)*cos(old_pose[2]);
theta = (old_pose[2] + alpha)%(2*pi)
x = cx + (R + robot_width/2)*sin(theta);
y = cy - (R + robot_width/2)*cos(theta);
return (x, y, theta)
if __name__ == '__main__':
# Empirically derived conversion from ticks to mm.
ticks_to_mm = 0.349
# Measured width of the robot (wheel gauge), in mm.
robot_width = 150.0
# Read data.
logfile = LegoLogfile()
logfile.read("robot4_motors.txt")
# Start at origin (0,0), looking along x axis (alpha = 0).
pose = (0.0, 0.0, 0.0)
# Loop over all motor tick records generate filtered position list.
filtered = []
for ticks in logfile.motor_ticks:
pose = filter_step(pose, ticks, ticks_to_mm, robot_width)
filtered.append(pose)
# Draw result.
for pose in filtered:
print(pose)
plot([p[0] for p in filtered], [p[1] for p in filtered], 'bo')
show()
|
jfrascon/SLAM_AND_PATH_PLANNING_ALGORITHMS
|
01-GETTING_STARTED/CODE/slam_02_a_filter_motor_question.py
|
slam_02_a_filter_motor_question.py
|
py
| 1,893 |
python
|
en
|
code
| 129 |
github-code
|
6
|
5569399042
|
"""Display image captured from image sensor"""
import numpy as np
import cv2
import socket
import tkinter
import pandas as pd
import datetime
import time
import os
class ImageGUI(object):
def __init__(self):
#self.buffer_size = 128 * 128 * 3 # picture size
self.buffer_size = (16384 * 2 + 2048 * 2) # picture size
self.img_buf_index = 0
self.img_buf_size = 3
self.img_buf = np.array([[[0] * 128] * 128] * self.img_buf_size)
self.array_buf = np.array([[0]*(128*128)]*3)
self.array_out_buf = np.array([[0]*(4*8*64)]*2)
self.array_out_shape = np.array([[[0] * 64] * 32] * 2)
self.array_pod_out = np.array([[0] * 64] * 32)
# udp must send bytes object
self.enquire_command = bytes([int('0x55', 16), 1]) # 0x55, 0x01
self.start_command = bytes([int('0x55', 16), 2])
self.stop_command = bytes([int('0x55', 16), 3])
self.stop_reply = bytes([int('0xaa', 16), int('0xf3', 16)])
# 创建主窗口,用于容纳其它组件
self.root = tkinter.Tk()
# 给主窗口设置标题内容
self.root.title("University of Macau AMSV Image Sensor Control")
self.root.geometry('500x300')
# 创建一个输入框,并设置尺寸
self.input_ip = tkinter.Entry(self.root,width=50)
# 创建一个回显列表
self.display_info = tkinter.Listbox(self.root, width=50)
# 创建按钮
#self.result_button = tkinter.Button(self.root, command = self.find_position, text = "查询")
self.connect_button = tkinter.Button(self.root, command = self.connect_fun, text = "Connect")
self.image_start_button = tkinter.Button(self.root, command = self.trans_start, text = "Start")
self.image_stop_button = tkinter.Button(self.root, command = self.image_save_stop, text = "Save")
# 完成布局
def gui_arrang(self):
self.input_ip.pack()
self.connect_button.place(x=100,y=220,height=50,width=100)
self.image_start_button.place(x=200,y=220,height=50,width=100)
self.image_stop_button.place(x=300,y=220,height=50,width=100)
self.display_info.pack()
def connect_fun(self, print_en = 1):
self.ip_addr = self.input_ip.get()
self.udp_server_ip_addr = self.ip_addr # target IP address
self.udp_port = 7 # port
self.soc=socket.socket()
self.soc.connect((self.udp_server_ip_addr, self.udp_port))
if(print_en == 1):
self.display_info.insert(0,"Connect successfully")
self.soc.close()
def trans_start(self): # This function will be executed when 'Start' button is clicked
## 为回显列表赋值
#self.display_info.insert(0,input_str)
#end=self.soc.send(self.start_command) # send 'start' command
while True:
t1_init = time.perf_counter()
self.connect_fun(0)
#for mean_cnt in range(10):
#buf_index = 0
#print('Tcp send')
self.soc.send(self.start_command) # send 'start' command
int_dat = []
cmd_dat = []
cmd_rec = []
############## receive data and cmd ###############
#print('Tcp receive')
cmd_rec = self.soc.recv(self.buffer_size)
for i in cmd_rec[:]: # transform bytes into int
cmd_dat.append(int(i))
if (int(cmd_dat[0]) == int('0x55', 16)) and (int(cmd_dat[1]) == int('0', 16)):
int_dat = cmd_dat[2:]
total_len = len(int_dat)
#print('Tcp receive num:', total_len)
while total_len < (16384 * 2 + 2048 * 2):
#if total_len > 36000:
# break
tcp_dat = self.soc.recv(self.buffer_size) # receive data again
for i in tcp_dat[:]: # transform bytes into int
int_dat.append(int(i))
total_len = len(int_dat)
#print('Tcp receive num:', total_len)
#if total_len < (16384 * 2 + 2048 * 2):
# print('TCP data lost! Receive Num:', total_len)
# self.soc.close()
# self.connect_fun()
# continue
self.array_buf[0][:] = np.array(int_dat[0:16384]) # 曝光前 Pod 数据
self.array_buf[1][:] = np.array(int_dat[16384:32768])
array_out_temp1 = np.array(int_dat[32768: (32768 + 2048)])
array_out_temp2 = np.array(int_dat[(32768 + 2048) : (32768 + 4096)]) # 曝光后 Pod 数据
# change the 8bit array_out_buf data into 64 bit
array_shape_temp1 = array_out_temp1.reshape((32, 64), order = 'F')
array_shape_temp2 = array_out_temp2.reshape((32, 64), order = 'F')
self.array_out_shape[0] = array_shape_temp1 # FPGA输出的 OUTR OUTL OUTU OUTD 数据
# self.array_out_shape[1] = array_shape_temp2 # Chip输出的 OUTR OUTL OUTU OUTD 数据
self.array_pod_out = array_shape_temp2 # Chip输出的 OUT 对应的Pod数据
self.array_buf[2] = self.array_buf[0] - self.array_buf[1]
self.img_buf[0] = self.tcp_data2mat(self.array_buf[2]) # reform bytes data into picture structure
img = np.mat(self.img_buf[0].astype(np.uint8)) # transform img_data into uint8 matrix
x, y = img.shape[0:2]
img_test1 = cv2.resize(img, (int(y * 6), int(x * 6))) # picture reshape (scaling)
#print('Open-CV show picture')
cv2.imshow('frame', img_test1)
if cv2.waitKey(1) & 0xFF == ord('c'):
#self.buf_img = img
#self.buf_tcp_dat = before_array
return 0
else:
print('Frame lost! ERROR_code:' + str(cmd_dat[:2]))
continue
self.soc.close()
frame_rate = 1/(time.perf_counter() - t1_init)
print('Frame Rate:%5.3f' % frame_rate, '/s')
def tcp_data2mat(self, int_dat):
#temp = np.array(int_dat)
#self.img_buf_index = (self.img_buf_index + 1) % self.img_buf_size
t1 = int_dat.reshape(-1, 16)
t2 = int_dat.reshape(-1, 16).T
t3 = t2[0][:].reshape(64,4,2,2)
pic = np.array([[0]*128]*128) # generate a 128x128 zero array
for i in range(16):
for j in range(64):
for k in range(4):
pic[2*j ][8*i+2*k ] = t2[i][0+4*k+16*j]
pic[2*j ][8*i+2*k+1] = t2[i][1+4*k+16*j]
pic[2*j+1][8*i+2*k+1] = t2[i][2+4*k+16*j]
pic[2*j+1][8*i+2*k ] = t2[i][3+4*k+16*j]
return pic
def image_save_stop(self):
# stop transfer
self.connect_fun(0)
end=self.soc.send(self.stop_command)
image_dat = self.soc.recv(10)
# create folder
folder_name = "./Pic_data/" + time.strftime("%Y%m%d%H%M%S")
dir_exist = os.path.isdir(folder_name)
if not dir_exist:
os.makedirs(folder_name)
#time_info = time.strftime("%Y%m%d%H%M%S")
# save data
save = pd.DataFrame(self.img_buf[0])
save.to_csv(folder_name + '/img_data.csv')
save = pd.DataFrame(self.array_buf[0])
save.to_csv(folder_name + '/before_exposure.csv')
save = pd.DataFrame(self.array_buf[1])
save.to_csv(folder_name + '/after_exposure.csv')
save = pd.DataFrame(self.array_buf[2])
save.to_csv(folder_name + '/sub_data.csv')
#save = pd.DataFrame(self.array_out_buf[0])
#save.to_csv(folder_name + '/out_chip_data.csv')
save = pd.DataFrame(self.array_out_shape[0])
save.to_csv(folder_name + '/out_fpga_data.csv')
#save = pd.DataFrame(self.array_out_shape[1])
#save.to_csv(folder_name + '/out_chip_data.csv')
save = pd.DataFrame(self.array_pod_out)
save.to_csv(folder_name + '/out_pod_data.csv')
#save = pd.DataFrame(self.array_out_buf[1])
#save.to_csv(folder_name + '/out_fpga_data.csv')
if(image_dat == self.stop_reply):
self.display_info.insert(0,'Stop and Save successfully!')
def image_show(self):
# Image show
cap = cv2.VideoCapture(0)
cap.open(0)
while True:
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def main():
# 初始化对象
FL = ImageGUI()
# 进行布局
FL.gui_arrang()
# 主程序执行
tkinter.mainloop()
pass
if __name__ == "__main__":
main()
########### UDP client Transfer #########
#from socket import *
#HOST = '192.168.1.10'
#PORT = 8080
#BUFSIZ = 1024
#ADDRESS = (HOST, PORT)
#udpClientSocket = socket(AF_INET, SOCK_DGRAM)
#
#while True:
# data = bytes([int('0xFE', 16), 0,2,0,1])
# if not data:
# break
#
# # 发送数据
# udpClientSocket.sendto(data, ADDRESS)
# # 接收数据
# data, ADDR = udpClientSocket.recvfrom(BUFSIZ)
# if not data:
# break
# print("服务器端响应:", data)
#
#udpClientSocket.close()
######## TCP Client Transfer #########
#client_sock = socket.socket()
#client_sock.connect(('192.168.1.10', 7))
## 发送个连接信息
#stop_command = bytes([int('0x55', 16), 3]) # udp must send bytes object
#client_sock.send(stop_command)
#while 1:
# recv_dat = client_sock.recv(1024)
# print(recv_dat)
# # 有关输入的
# aa = input("echo >>:")
# if aa == 'exit':
# break
# while not aa:
# aa = input("echo >>:")
# # 重点就是上下两句
# client_sock.send(aa.encode('utf-8'))
#client_sock.close()
|
yg99992/Image_transfer_open_source
|
python_code/Image_show.py
|
Image_show.py
|
py
| 10,237 |
python
|
en
|
code
| 6 |
github-code
|
6
|
28400031595
|
import os
import time
from datetime import datetime
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from torch.autograd import Variable
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import random
import numpy as np
import utils.config as config
import matplotlib.pyplot as plt
import os, psutil
import functools
from skimage.measure import label as sk_label
from skimage.measure import regionprops as sk_regions
from skimage.transform import resize
# let all of print can be flush = ture
print = functools.partial(print, flush=True)
#-------- Dataloder --------
# After augmnetation with resize, crop spleen area and than transofermer
class BoxCrop(object):
'''
Croping image by bounding box label after augmentation
input: keys=["image", "label"]
label:
[[x1,y1,x2,y2,z1,z2,class]...]
image:
[1,x,y,z]
output dictionary add
im_info: [x,y,z,scale_x_y,scale_z]
num_box: 1 (All is one in our data)
'''
def __init__(self,keys):
self.keys = keys
def __call__(self, data):
d = dict(data)
image = d['image']
label = d['label']
# only one label
if type(label) == type(np.array([])):
label_list = label.tolist()
else:
# more than one label
# select the first label
label_list = eval(label)[0]
if label_list[1]>=label_list[3] or label_list[0]>=label_list[2] or label_list[4]>=label_list[5]:
raise RuntimeError(f"{d['image_meta_dict']['filename_or_obj']} bounding box error")
#print(f"{d['image_meta_dict']['filename_or_obj']} bounding box error ")
out_image = image[0, int(label_list[1]):int(label_list[3]), int(label_list[0]):int(label_list[2]), int(label_list[4]):int(label_list[5])]
d['image'] = np.expand_dims(out_image,axis=0)
d['label'] = label_list[6]
#print(d['image'].shape)
return d
# Dulicated dataset by num_samples
class Dulicated(object):
'''
Dulicated data for augmnetation
'''
def __init__(self,
keys,
num_samples: int = 1):
self.keys = keys
self.num_samples = num_samples
def __call__(self, data):
d = dict(data)
image = d['image']
label = d['label']
results: List[Dict[Hashable, np.ndarray]] = [dict(data) for _ in range(self.num_samples)]
for key in data.keys():
for i in range(self.num_samples):
results[i][key] = data[key]
return results
#return d
# True label
class Annotate(object):
'''
transform mask to bounding box label after augmentation
check the image shape to know scale_x_y, scale_z
input: keys=["image", "label"]
output dictionary add
im_info: [x,y,z,scale_x_y,scale_z]
num_box: 1 (All is one in our data)
'''
def __init__(self,keys):
self.keys = keys
def __call__(self, data):
d = dict(data)
#image = d[self.keys[0]]
#label = d[self.keys[1]]
image = d['image']
label = d['label']
label = label.squeeze(0)
annotations = np.zeros((1, 7))
annotation = mask2boundingbox(label)
if annotation == 0:
annotation = annotations
raise ValueError('Dataloader data no annotations')
#print("Dataloader data no annotations")
else:
# add class label
cls = d['class']
annotation = np.array(annotation)
annotation = np.append(annotation, cls)
#annotation = np.expand_dims(annotation,0)
#print(annotation.shape)
#print(image.shape)
d['label'] = annotation
return d
def mask2boundingbox(label):
if torch.is_tensor(label):
label = label.numpy()
sk_mask = sk_label(label)
regions = sk_regions(label.astype(np.uint8))
#global top, left, low, bottom, right, height
#print(regions)
# check regions is empty
if not regions:
return 0
for region in regions:
# print('[INFO]bbox: ', region.bbox)
# region.bbox (x1,y1,z1,x2,y2,z2)
# top, left, low, bottom, right, height = region.bbox
y1, x1, z1, y2, x2, z2 = region.bbox
# return left, top, right, bottom, low, height
return x1, y1, x2, y2, z1, z2
#-------- Running setting --------
'''
def adjust_learning_rate_by_step(optimizer, epoch, init_lr, decay_rate=.5 ,lr_decay_epoch=40):
#Sets the learning rate to initial LR decayed by e^(-0.1*epochs)
lr = init_lr * (decay_rate ** (epoch // lr_decay_epoch))
for param_group in optimizer.param_groups:
#param_group['lr'] = param_group['lr'] * math.exp(-decay_rate*epoch)
param_group['lr'] = lr
#lr = init_lr * (0.1**(epoch // lr_decay_epoch))
#print('LR is set to {}'.format(param_group['lr']))
return optimizer , lr
def adjust_learning_rate(optimizer, epoch, init_lr, decay_rate=.5):
#Sets the learning rate to initial LR decayed by e^(-0.1*epochs)
lr = init_lr * decay_rate
for param_group in optimizer.param_groups:
#param_group['lr'] = param_group['lr'] * math.exp(-decay_rate*epoch)
param_group['lr'] = lr
#lr = init_lr * (0.1**(epoch // lr_decay_epoch))
#print('LR is set to {}'.format(param_group['lr']))
return optimizer , lr
'''
def train(model, device, data_num, epochs, optimizer, loss_function, train_loader, valid_loader, early_stop, scheduler, check_path):
# Let ini config file can be writted
#global best_metric
#global best_metric_epoch
#val_interval = 2
best_metric = -1
best_metric_epoch = -1
trigger_times = 0
#epoch_loss_values = list()
writer = SummaryWriter()
for epoch in range(epochs):
print("-" * 10)
print(f"epoch {epoch + 1}/{epochs}")
# record ram memory used
process = psutil.Process(os.getpid())
print(f'RAM used:{process.memory_info().rss/ 1024 ** 3} GB')
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = batch_data['image'].to(device), batch_data['label'].long().to(device)
optimizer.zero_grad()
#inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
#print(f'outputs:{outputs.size()}')
#print(f'labels:{labels.size()}')
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_len = data_num // train_loader.batch_size
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
epoch_loss /= step
config.epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
# Early stopping & save best weights by using validation
metric = validation(model, valid_loader, device)
scheduler.step(metric)
# checkpoint setting
if metric > best_metric:
# reset trigger_times
trigger_times = 0
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), f"{check_path}/{best_metric}.pth")
print('trigger times:', trigger_times)
print("saved new best metric model")
else:
trigger_times += 1
print('trigger times:', trigger_times)
# Save last 3 epoch weight
if early_stop - trigger_times <= 3 or epochs - epoch <= 3:
torch.save(model.state_dict(), f"{check_path}/{metric}_last.pth")
print("save last metric model")
print(
"current epoch: {} current accuracy: {:.4f} best accuracy: {:.4f} at epoch {}".format(
epoch + 1, metric, best_metric, best_metric_epoch
)
)
writer.add_scalar("val_accuracy", metric, epoch + 1)
# early stop
if trigger_times >= early_stop:
print('Early stopping!\nStart to test process.')
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
return model
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
config.best_metric = best_metric
config.best_metric_epoch = best_metric_epoch
writer.close()
#print(f'training_torch best_metric:{best_metric}',flush =True)
#print(f'training_torch config.best_metric:{config.best_metric}',flush =True)
return model
class AngleLoss_predict(nn.Module):
def __init__(self, gamma=0):
super(AngleLoss_predict, self).__init__()
self.gamma = gamma
self.it = 1
self.LambdaMin = 5.0
self.LambdaMax = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
cos_theta, phi_theta = input
target = target.view(-1, 1) # size=(B,1)
index = cos_theta.data * 0.0 # size=(B, Classnum)
# index = index.scatter(1, target.data.view(-1, 1).long(), 1)
#index = index.byte()
index = index.bool()
index = Variable(index)
# index = Variable(torch.randn(1,2)).byte()
self.lamb = max(self.LambdaMin, self.LambdaMax / (1 + 0.1 * self.it))
output = cos_theta * 1.0 # size=(B,Classnum)
output1 = output.clone()
# output1[index1] = output[index] - cos_theta[index] * (1.0 + 0) / (1 + self.lamb)
# output1[index1] = output[index] + phi_theta[index] * (1.0 + 0) / (1 + self.lamb)
output[index] = output1[index]- cos_theta[index] * (1.0 + 0) / (1 + self.lamb)+ phi_theta[index] * (1.0 + 0) / (1 + self.lamb)
return(output)
def validation(model, val_loader, device):
#metric_values = list()
model.eval()
with torch.no_grad():
num_correct = 0.0
metric_count = 0
for val_data in val_loader:
val_images, val_labels = val_data['image'].to(device), val_data['label'].to(device)
val_outputs = model(val_images)
# base on AngleLoss
if isinstance(val_outputs, tuple):
val_outputs = AngleLoss_predict()(val_outputs,val_labels)
value = torch.eq(val_outputs.argmax(dim=1), val_labels)
metric_count += len(value)
num_correct += value.sum().item()
metric = num_correct / metric_count
config.metric_values.append(metric)
#print(f'validation metric:{config.metric_values}',flush =True)
return metric
def plot_loss_metric(epoch_loss_values,metric_values,save_path):
plt.figure("train", (12, 6))
plt.subplot(1, 2, 1)
plt.title("Epoch Average Loss")
x = [i + 1 for i in range(len(epoch_loss_values))]
y = epoch_loss_values
plt.xlabel("epoch")
plt.plot(x, y)
plt.subplot(1, 2, 2)
plt.title("Val Accuracy")
x = [i + 1 for i in range(len(metric_values))]
y = metric_values
plt.xlabel("epoch")
plt.plot(x, y)
plt.savefig(f'{save_path}/train_loss_metric.png')
def kfold_split(file, kfold, seed, type, fold):
if type == 'pos':
d = {}
file_list = ['file']
file_list.extend([f'pos_split_df_{i}' for i in range(kfold)])
d['file'] = file
for i in range(kfold):
d[f'test_pos_df_{i}'] = d[file_list[i]].groupby(["gender","age_range","spleen_injury_class"],group_keys=False).apply(lambda x: x.sample(frac=1/(kfold-i),random_state=1))
d[f'pos_split_df_{i}'] = d[file_list[i]].drop(d[f'test_pos_df_{i}'].index.to_list())
output_file = d[f'test_pos_df_{fold}']
elif type == 'neg':
file_list = [f'neg_split_df_{i}' for i in range(kfold)]
file_list = np.array_split(file.sample(frac=1,random_state=seed), kfold)
output_file = file_list[fold]
return output_file
def Data_progressing(pos_file, neg_file, box_df, imbalance_data_ratio, data_split_ratio, seed, fold, save_file = False, cropping = True):
# Pos data progress
for index, row in pos_file.iterrows():
if row['OIS']==row['OIS']:
pos_file.loc[index,'spleen_injury_grade'] = row['OIS']
else:
pos_file.loc[index,'spleen_injury_grade'] = row['R_check']
new_col= 'age_range'
new_col_2 = 'spleen_injury_class'
bins = [0,30,100]
bins_2 = [0,2,5]
label_2 = ['OIS 1,2','OIS 3,4,5']
pos_file[new_col] = pd.cut(x=pos_file.age, bins=bins)
pos_file[new_col_2] = pd.cut(x=pos_file.spleen_injury_grade, bins=bins_2, labels=label_2)
# positive need select column and split in kfold
test_pos_df = kfold_split(pos_file, int(1/data_split_ratio[2]), seed, 'pos', fold)
train_pos_file = pos_file.drop(test_pos_df.index.to_list())
valid_pos_df = train_pos_file.groupby(['gender','age_range','spleen_injury_class'],group_keys=False).apply(lambda x: x.sample(frac=data_split_ratio[1]/(1-data_split_ratio[2]),random_state=seed))
train_pos_df = train_pos_file.drop(valid_pos_df.index.to_list())
# negative only need split in kfold
neg_sel_df = neg_file.sample(n=len(pos_file),random_state=seed)
test_neg_df = kfold_split(neg_sel_df, int(1/data_split_ratio[2]), seed, 'neg', fold)
train_neg_file = neg_file.drop(test_neg_df.index.to_list())
valid_neg_df = train_neg_file.sample(n=len(valid_pos_df),random_state=seed)
train_neg_df = train_neg_file.drop(valid_neg_df.index.to_list()).sample(n=len(train_pos_df)*imbalance_data_ratio,random_state=seed)
train_df = pd.concat([train_neg_df,train_pos_df])
valid_df = pd.concat([valid_neg_df,valid_pos_df])
test_df = pd.concat([test_neg_df,test_pos_df])
train_data = box_df[box_df.Path.isin(train_df.source.to_list())]
valid_data = box_df[box_df.Path.isin(valid_df.source.to_list())]
test_data = box_df[box_df.Path.isin(test_df.source.to_list())]
train_df['spleen_injury'] = np.array([0 if i else 1 for i in train_df.spleen_injury_class.isna().tolist()])
valid_df['spleen_injury'] = np.array([0 if i else 1 for i in valid_df.spleen_injury_class.isna().tolist()])
test_df['spleen_injury'] = np.array([0 if i else 1 for i in test_df.spleen_injury_class.isna().tolist()])
if save_file:
test_df_output = pd.merge(test_data.loc[:,['ID','Path','BBox','Posibility']],test_df,left_on='Path',right_on='source',suffixes = ['','_x'])
valid_df_output = pd.merge(test_data.loc[:,['ID','Path','BBox','Posibility']],test_df,left_on='Path',right_on='source',suffixes = ['','_x'])
test_df_output = test_df_output.drop(['ID_x'],axis=1)
valid_df_output = valid_df_output.drop(['ID_x'],axis=1)
test_df_output = test_df_output.loc[:,test_df_output.columns[~test_df_output.columns.str.contains('Unnamed')]]
valid_df_output = valid_df_output.loc[:,valid_df_output.columns[~valid_df_output.columns.str.contains('Unnamed')]]
valid_df_output.to_csv(f'{save_file}/fold{fold}_valid.csv',index = False)
test_df_output.to_csv(f'{save_file}/fold{fold}_test.csv',index = False)
if cropping:
train_data_dicts = []
for index,row in train_data.iterrows():
image = row['Path']
label = row['BBox']
train_data_dicts.append({'image':image,'label':label})
valid_data_dicts = []
for index,row in valid_data.iterrows():
image = row['Path']
label = row['BBox']
valid_data_dicts.append({'image':image,'label':label})
test_data_dicts = []
for index,row in test_data.iterrows():
image = row['Path']
label = row['BBox']
test_data_dicts.append({'image':image,'label':label})
else:
train_data_dicts =[
{"image": image_name, "label": label_name}
for image_name, label_name in zip([i for i in train_df.source.tolist()], [i for i in train_df.spleen_injury.tolist()] )
]
valid_data_dicts =[
{"image": image_name, "label": label_name}
for image_name, label_name in zip([i for i in valid_df_output.source.tolist()], [i for i in valid_df_output.spleen_injury.tolist()] )
]
test_data_dicts =[
{"image": image_name, "label": label_name}
for image_name, label_name in zip([i for i in test_df_output.source.tolist()], [i for i in test_df_output.spleen_injury.tolist()] )
]
return train_data_dicts, valid_data_dicts, test_data_dicts
class FocalLoss(nn.Module):
def __init__(self, class_num, alpha=None, gamma=2, size_average=True):
"""
focal_loss损失函数, -α(1-yi)**γ *ce_loss(xi,yi)
步骤详细的实现了 focal_loss损失函数.
:param alpha: 阿尔法α,类别权重. 当α是列表时,为各类别权重,当α为常数时,类别权重为[α, 1-α, 1-α, ....],常用于 目标检测算法中抑制背景类 , retainnet中设置为0.25
:param gamma: 伽马γ,难易样本调节参数. retainnet中设置为2
:param num_classes: 类别数量
:param size_average: 损失计算方式,默认取均值
"""
super(FocalLoss, self).__init__()
if alpha is None: # alpha 是平衡因子
self.alpha = Variable(torch.ones(class_num, 1))
else:
if isinstance(alpha, list):
self.alpha = torch.Tensor(alpha)
else:
self.alpha = torch.zeros(class_num)
self.alpha[0] += alpha
self.alpha[1:] += (1-alpha)
self.gamma = gamma # 指数
self.class_num = class_num # 类别数目
self.size_average = size_average # 返回的loss是否需要mean一下
def forward(self, preds, labels):
"""
focal_loss损失计算
:param preds: 预测类别. size:[B,N,C] or [B,C] 分别对应与检测与分类任务, B 批次, N检测框数, C类别数
:param labels: 实际类别. size:[B,N] or [B]
:return:
"""
# assert preds.dim()==2 and labels.dim()==1
preds = preds.view(-1,preds.size(-1))
self.alpha = self.alpha.to(preds.device)
preds_softmax = F.softmax(preds, dim=1) # 这里并没有直接使用log_softmax, 因为后面会用到softmax的结果(当然你也可以使用log_softmax,然后进行exp操作)
preds_softmax = preds_softmax.clamp(min=0.0001,max=1.0) # 避免數值過小 進log後 loss 為nan
preds_logsoft = torch.log(preds_softmax)
preds_softmax = preds_softmax.gather(1,labels.view(-1,1)) # 这部分实现nll_loss ( crossempty = log_softmax + nll )
preds_logsoft = preds_logsoft.gather(1,labels.view(-1,1))
self.alpha = self.alpha.gather(0,labels.view(-1))
loss = -torch.mul(torch.pow((1-preds_softmax), self.gamma), preds_logsoft) # torch.pow((1-preds_softmax), self.gamma) 为focal loss中 (1-pt)**γ
loss = torch.mul(self.alpha, loss.t())
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
|
houhsein/Spleen_injury_detection
|
classification/utils/training_torch_utils.py
|
training_torch_utils.py
|
py
| 19,509 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18537216469
|
# prob_link: https://www.codingninjas.com/codestudio/problems/majority-element-ii_8230738?challengeSlug=striver-sde-challenge&leftPanelTab=0
from math import *
from collections import *
from sys import *
from os import *
def majorityElementII(arr):
n = len(arr)
# Write your code here.
mp = {}
for x in arr:
if x not in mp:
mp[x] = 1
else:
mp[x] += 1
ans = []
for key, val in mp.items():
if val > floor(n / 3):
ans.append(key)
return ans
|
Red-Pillow/Strivers-SDE-Sheet-Challenge
|
P16_Majority Element-II.py
|
P16_Majority Element-II.py
|
py
| 549 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14188272016
|
from fastapi import FastAPI
app = FastAPI()
COLUMN_NAME = "name"
COLUMN_ID = "id"
FAKE_DB = [
{"id": 1, "name": "Vladimir"},
{"id": 2, "name": "Polina"},
{"id": 3, "name": "Aleksander"}
]
def find_friend_name(friend_id, db_name):
for row in db_name:
if row.get(COLUMN_ID) == friend_id:
return row.get(COLUMN_NAME)
return None
@app.get("/friends/{friend_id}")
async def get_friend_name(friend_id: int):
friend_name = find_friend_name(friend_id, FAKE_DB)
if friend_name is None:
return {"error": f"No such friend with id {friend_id}"}
return {"friend_name": friend_name}
@app.get("/")
async def root():
return {"message": "Hello world!"}
|
DanilaLabydin/Python-tasks-solving-practice
|
app/main.py
|
main.py
|
py
| 715 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5005445920
|
from __future__ import annotations
from pathlib import Path
from typing import Any, cast
import _testutils
import pytest
from lxml.html import (
HtmlElement as HtmlElement,
find_class,
find_rel_links,
iterlinks,
make_links_absolute,
parse,
resolve_base_href,
rewrite_links,
)
reveal_type = getattr(_testutils, "reveal_type_wrapper")
def test_input_content_type(h_filepath: Path) -> None:
fio = open(h_filepath, "rb")
tree = parse(h_filepath)
for bad_input in [h_filepath, fio, tree]:
with pytest.raises(
AttributeError, match="object has no attribute 'find_rel_links'"
):
_ = find_rel_links(cast(Any, bad_input), "stylesheet")
fio.close()
links = find_rel_links(str(h_filepath), "stylesheet")
reveal_type(links)
assert links == find_rel_links(tree.getroot(), "stylesheet")
assert links == find_rel_links(h_filepath.read_text(), "stylesheet")
assert links == find_rel_links(h_filepath.read_bytes(), "stylesheet")
def test_find_class(h_filepath: Path) -> None:
elems = find_class(h_filepath.read_text(), "single")
reveal_type(elems)
for e in elems:
reveal_type(e)
def test_iterlinks(h_filepath: Path) -> None:
results = iterlinks(h_filepath.read_text())
reveal_type(results)
for r in results:
assert len(r) == 4
reveal_type(r[0])
reveal_type(r[1])
reveal_type(r[2])
reveal_type(r[3])
class TestOutputType:
BASE = "http://dummy.link"
def test_make_links_absolute(self, h_filepath: Path) -> None:
in_data1 = h_filepath.read_bytes()
with pytest.raises(
TypeError, match="No base_url given, and the document has no base_url"
):
out_data1 = make_links_absolute(in_data1)
out_data1 = make_links_absolute(in_data1, self.BASE)
assert type(in_data1) == type(out_data1)
in_data2 = h_filepath.read_text()
with pytest.raises(TypeError, match="Cannot mix str and non-str"):
out_data2 = make_links_absolute(
in_data2, cast(Any, self.BASE.encode("ascii"))
)
out_data2 = make_links_absolute(in_data2, self.BASE)
assert type(in_data2) == type(out_data2)
tree = parse(h_filepath)
in_data3 = tree.getroot()
out_data3 = make_links_absolute(in_data3, self.BASE)
assert type(in_data3) == type(out_data3)
def test_resolve_base_href(self, h_filepath: Path) -> None:
in_data1 = h_filepath.read_bytes()
out_data1 = resolve_base_href(in_data1)
assert type(in_data1) == type(out_data1)
in_data2 = h_filepath.read_text()
out_data2 = resolve_base_href(in_data2)
assert type(in_data2) == type(out_data2)
tree = parse(h_filepath)
in_data3 = tree.getroot()
out_data3 = resolve_base_href(in_data3)
assert type(in_data3) == type(out_data3)
def test_rewrite_links(self, h_filepath: Path) -> None:
in_data1 = h_filepath.read_bytes()
out_data1 = rewrite_links(in_data1, lambda _: self.BASE)
assert type(in_data1) == type(out_data1)
in_data2 = h_filepath.read_text()
with pytest.raises(TypeError, match="can only concatenate str"):
out_data2 = rewrite_links(
in_data2, lambda _: cast(Any, self.BASE.encode("ASCII"))
)
out_data2 = rewrite_links(in_data2, lambda _: self.BASE)
assert type(in_data2) == type(out_data2)
tree = parse(h_filepath)
in_data3 = tree.getroot()
out_data3 = rewrite_links(in_data3, lambda _: None)
assert type(in_data3) == type(out_data3)
|
abelcheung/types-lxml
|
test-rt/test_html_link_funcs.py
|
test_html_link_funcs.py
|
py
| 3,706 |
python
|
en
|
code
| 23 |
github-code
|
6
|
14565937194
|
# Check if a given parentheses string is valid
#
# Input: par: string
# Output: true or false: bool
#
# We need a stack to store opening braces
# We need a map to store types of braces
#
# Check the length of the string, if the length is odd, return False
# Loop through the list, for each char,
# - If it is an opening brace, add it to the stack
# - If it is not an opening brace,
# -- If the stack is empty, return False
# -- Pop the stack and get the next opening brace
# --- If char is not equal to value at map with key opening brace , return False
# Return whether the stack is empty:
# - if there's an item left when the loop is done, the string is clearly unbalanced
def is_valid(par_str: str) -> bool:
stack = []
par_map = {"{": "}", "[": "]", "(": ")"}
if len(par_str) % 2 != 0:
return False
for char in par_str:
if char in par_map.keys():
stack.append(char)
else:
if len(stack) == 0:
return False
open_brac = stack.pop()
if char != par_map[open_brac]:
return False
return stack == []
|
HemlockBane/ds_and_algo
|
stacks/study_questions.py
|
study_questions.py
|
py
| 1,124 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14098998919
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
===================================
Timer --- Create a timer decorator.
===================================
Largely this module was simply practice on writing decorators.
Might need to review logging best practices. I don't want the logger from
this module to emit anything, but it seems tedious to place that burden
on any module that imports from here.
.. seealso::
:mod:`cProfile`
:mod:`pstats`
:mod:`timeit`
:magic:`timeit`
"""
import datetime
import functools
import logging
from os import scandir
from runpy import run_path
import time
from timeit import Timer
from IPython.core.getipython import get_ipython
# noinspection PyProtectedMember
from IPython.core.magics.execution import _format_time as format_delta
logging.basicConfig(level=logging.INFO)
def timer(func):
"""Print the runtime of the decorated function.
Utilizes `time.perf_counter`.
.. todo:: Begin using the :mod:`timeit` module.
There are more specialized ways of profiling things in
other modules; however, this works for a rough estimate.
Parameters
----------
func : function
Function to profile
Returns
-------
value : float
Output of function :func:`time.perf_counter()`.
"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
logging.info(f"Finished {func.__name__!r} in {run_time:.4f} secs")
return value
return wrapper_timer
# class ModuleTimer()
# I mean while we're practicing decorators throw this in the mix
def debug(func):
"""Print the function signature and return value"""
@functools.wraps(func)
def wrapper_debug(*args, **kwargs):
args_repr = [repr(a) for a in args] # 1
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] # 2
signature = ", ".join(args_repr + kwargs_repr) # 3
print(f"Calling {func.__name__}({signature})")
value = func(*args, **kwargs)
print(f"{func.__name__!r} returned {value!r}") # 4
return value
return wrapper_debug
def exc_timer(statement, setup=None):
"""A non-decorator implementation that uses `timeit`."""
t = Timer(stmt=statement, setup=setup) # outside the try/except
try:
return t.timeit()
except Exception: # noqa E722
t.print_exc()
class ArgReparser:
"""Class decorator that echoes out the arguments a function was called with."""
def __init__(self, func):
"""Initialize the reparser with the function it wraps."""
self.func = func
def __call__(self, *args, **kwargs):
print("entering function " + self.func.__name__)
i = 0
for arg in args:
print("arg {0}: {1}".format(i, arg))
i = i + 1
return self.func(*args, **kwargs)
def time_dir(directory=None):
"""How long does it take to exec(compile(file)) every file in the startup dir?"""
if directory is None:
directory = get_ipython().startup_dir
result = []
for i in scandir("."):
if i.name.endswith(".py"):
file = i.name
print(file)
print(time.time())
start_time = time.time()
exec(compile(open(file).read(), "timer", "exec"))
end = time.time()
diff = end - start_time
print(f"{diff}")
result.append((file, diff))
return result
class LineWatcher:
"""Class that implements a basic timer.
Registers the `start` and `stop` methods with the IPython events API.
"""
def __init__(self):
"""Define the classes start_time parameter."""
self.start_time = self.start()
def start(self):
"""Return `time.time`."""
return time.time()
def __repr__(self):
return f"{self.__class__.__name__} {self.start_time}"
def stop(self):
"""Determine the difference between start time and end time."""
stop_time = time.time()
diff = abs(stop_time - self.start_time)
print("time: {}".format(format_delta(diff)))
return diff
def load_ipython_extension(ip=None, line_watcher=None):
"""Initialize a `LineWatcher` and register start and stop with IPython."""
if ip is None:
ip = get_ipython()
if ip is None:
return
if line_watcher is None:
line_watcher = LineWatcher()
ip.events.register("pre_run_cell", line_watcher.start)
ip.events.register("post_run_cell", line_watcher.stop)
def unload_ipython_extension(ip=None, line_watcher=None):
if ip is None:
ip = get_ipython()
if ip is None:
return
if line_watcher is None:
line_watcher = LineWatcher()
ip.events.unregister("pre_run_cell", line_watcher.start)
ip.events.unregister("post_run_cell", line_watcher.stop)
|
farisachugthai/dynamic_ipython
|
default_profile/util/timer.py
|
timer.py
|
py
| 5,023 |
python
|
en
|
code
| 7 |
github-code
|
6
|
77938817
|
"""
file structure:
flip_labels_and_scans.py
scan_directrory - raw scans folder
label_directrory - labels folder
save_dir_scan - flipped scans folder (where they will be saved)
save_dir_labels - flipped labels folder (where they will be saved)
This script flips nii (nifti) labels and scans along the sagittal plane. The plane flipping occurs on can be modified by changing the transformation matrix in the flip3dlabel and flip3dscan functions.
This script assumes the following file naming conventions:
scans: "scanIdentifier_somesuffix.nii"
labels: "scanIdentifier_50um_segmentation_IE-label.nii", the suffix can be modified by altering "label_name" in the "scan_flip_iterator" function.
note that scanIdentifier should be unique.
"""
#imports
import numpy as np
import SimpleITK as sitk
import os
#specify directory of scans you would like to flip
scan_directrory = 'scan_154um'
#specify directory of where labelmaps are
label_directrory = 'lab'
#specify directory where you want to save flipped scans
save_dir_scan = 'scan_save'
#specify directory where you want to save flipped labels
save_dir_labels = 'lab_save'
def get_center(img):
"""
This function returns the physical center point of a 3d sitk image
:param img: The sitk image we are trying to find the center of
:return: The physical center point of the image
"""
width, height, depth = img.GetSize()
return img.TransformIndexToPhysicalPoint((int(np.ceil(width/2)),
int(np.ceil(height/2)),
int(np.ceil(depth/2))))
def flip3dlabel(img):
"""
This function flips the sitk label passeed to it with NN interpolation
:param img: An sitk labelmap
:return: The flipped label
"""
affineTrans = sitk.AffineTransform(3)
image_center = get_center(img)
affineTrans.SetMatrix([-1,0,0,0,1,0,0,0,1])
affineTrans.SetCenter(image_center)
flipped = sitk.Resample(img, affineTrans,sitk.sitkNearestNeighbor)
return flipped
def flip3dscan(img,lab):
"""
This function flips the sitk image passeed to it with BSpline interpolation
:param img: An sitk image
:param lab: An sitk label associated with the given image - used to maintain alignment
:return: The flipped image
"""
affineTrans = sitk.AffineTransform(3)
image_center = get_center(lab)
affineTrans.SetMatrix([-1,0,0,0,1,0,0,0,1])
affineTrans.SetCenter(image_center)
interpolator = sitk.sitkBSpline
flipped = sitk.Resample(img, img, affineTrans,
interpolator, -2000)
return flipped
def label_flip_iterator(file):
"""
This function is called each time a label is flipped. Naming and saving is done here.
:param file: filename of label
"""
prefix = file.split("_")[0] #get the sample prefix IE '1932L'
name_without_filetype = file.split(".nii")[0] #file name before the extension (.nii)
newname = name_without_filetype+"_flipped.nii"
lab = sitk.ReadImage(label_directrory+'/'+file)
flipped_lab = flip3dlabel(lab)
sitk.WriteImage(flipped_lab,save_dir_labels+"/"+newname)#labels are saved with _flipped appended to their original names
def scan_flip_iterator(file):
"""
This function is called each time a scan is flipped. Naming and saving is done here.
:param file: filename of scan
"""
prefix = file.split("_")[0] #get the scan prefix IE '1932L'
name_without_filetype = file.split(".nii")[0] #everything before the extension (.nii)
newname = name_without_filetype+"_flipped.nii"
label_name = prefix+"_50um_segmentation_IE-label_flipped.nii" #labels corresponding to scans need this naming convention following prefix
im = sitk.ReadImage(scan_directrory+"/"+file)
lab = sitk.ReadImage(save_dir_labels+'/'+label_name)
flipped_im = flip3dscan(im,lab) #flip the image with respect to its already flipped label
sitk.WriteImage(flipped_im,save_dir_scan+"/"+newname) #scans are saved with _flipped appended to their original names
dir=os.listdir(label_directrory)
for i in range(0,len(dir)): #iterate through the directory of labels
label_flip_iterator(dir[i])
dir=os.listdir(scan_directrory)
for i in range(0,len(dir)): #iterate through the directory of raw scans
scan_flip_iterator(dir[i])
|
kylerioux/python_ML_scripts
|
3d_image_preprocessing/flip_scans_and_labels.py
|
flip_scans_and_labels.py
|
py
| 4,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34214358930
|
import json
# Set file paths
basePath = 'D:\\NTCIR-12_MathIR_arXiv_Corpus\\'
inputPath = basePath + "output_FeatAna\\"
index_file = 'inverse_semantic_index_formula_catalog(physics_all).json'
#basePath = 'D:\\NTCIR-12_MathIR_Wikipedia_Corpus\\'
#inputPath = basePath + "output_RE\\"
#index_file = 'inverse_semantic_index_formula_catalog(Wikipedia).json'
# Load inverse index
with open(inputPath + index_file,'r',encoding='utf8') as f:
formula_index = json.load(f)
# Load example queries
with open('../examples_list/formula_examples.json', 'r', encoding='utf8') as f:
example_queries = json.load(f)
results = {}
for example_query in example_queries:
GoldID = example_query['GoldID']
FormulaName = example_query['formula_name']
# retrieve only results that are common in all query word results
common_results = {}
for query_word in FormulaName.split():
try:
for formula in formula_index[query_word].items():
try:
common_results[formula[0]] += 1
except:
common_results[formula[0]] = 1
except:
pass
ranking = {}
for common_result in common_results.items():
if True: #common_result[1] == len(FormulaName.split()):
for query_word in FormulaName.split():
try:
ranking[common_result[0]] += formula_index[query_word][common_result[0]]
except:
try:
ranking[common_result[0]] = formula_index[query_word][common_result[0]]
except:
pass
result = {k: v for k, v in sorted(ranking.items(), key=lambda item: item[1],reverse=True)}
results[GoldID] = (FormulaName,result)
# output to csv
csv_list = []
csv_list.append("GoldID\tName\tFormula\t(Score,Rank)\tDCG\tnDCG\n")
for result in results.items():
# display only first hits or ranking cutoff
displayed = False
counter = 0
for formula in result[1][1].items():
if counter < 10: # True: #displayed == False:
csv_list.append(result[0] + "\t" + result[1][0] + "\t"
+ formula[0].replace("\t","").replace("\n","") + "\t\t\t\n")
displayed = True
counter += 1
with open("inverse_formula_index_results.csv", 'w', encoding='utf8') as f:
f.writelines(csv_list)
print("end")
|
pratyushshukla19/Minor-Project-2
|
semanticsearch/modes13-15/evaluate_inverse_formula_index.py
|
evaluate_inverse_formula_index.py
|
py
| 2,411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44855958646
|
"""
Source: https://www.geeksforgeeks.org/dynamic-programming-set-13-cutting-a-rod/
Given a rod of length n inches and an array of prices that contains prices of all pieces of size smaller than n.
Determine the maximum value obtainable by cutting up the rod and selling the pieces.
For example, if length of the rod is 8 and the values of different pieces are given as following,
then the maximum obtainable value is 22 (by cutting in two pieces of lengths 2 and 6)
length | 1 2 3 4 5 6 7 8
--------------------------------------------
price | 1 5 8 9 10 17 17 20
And if the prices are as following, then the maximum obtainable value is 24 (by cutting in eight pieces of length 1)
length | 1 2 3 4 5 6 7 8
--------------------------------------------
price | 3 5 8 9 10 17 17 20
"""
priceDict = {1:3, 2:5, 3:8, 4:9, 5:10, 6:17, 7:17, 8:20}
solutions = {}
def optOfN(n):
if n in solutions.keys():
return solutions[n]
if n == 1:
solutions[1] = priceDict[1]
return solutions[1]
opt = [priceDict[n]]
for i in range(1,int(n/2)+1):
opt.append(optOfN(i)+optOfN(n-i))
solutions[n] = max(opt)
return solutions[n]
optOfN(8)
print(solutions)
|
sandeepjoshi1910/Algorithms-and-Data-Structures
|
optimal_rod.py
|
optimal_rod.py
|
py
| 1,271 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71476989628
|
import sys
input = sys.stdin.readline
dx = [1, 0]
dy = [0, 1]
T = int(input())
for tc in range(T):
M, N, K = map(int, input().split())
original = []
stack = []
for _ in range(K):
x, y = map(int, input().split())
original.append((x, y))
stack.append((x, y))
for nx, ny in original:
for i in range(2):
if 0 <= nx+dx[i] < M and 0 <= ny+dy[i] < N:
if (nx+dx[i], ny+dy[i]) in original:
if (nx+dx[i], ny+dy[i]) in stack:
idx = stack.index((nx+dx[i], ny+dy[i]))
stack.pop(idx)
else:
if (nx, ny) in stack:
idx = stack.index((nx, ny))
stack.pop(idx)
print(stack)
print(len(stack))
|
YOONJAHYUN/Python
|
BOJ/1012_2.py
|
1012_2.py
|
py
| 831 |
python
|
en
|
code
| 2 |
github-code
|
6
|
73083659707
|
import numpy as np
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
def has_converged(centers, new_centers):
return set([tuple(a) for a in centers]) == set([tuple(a) for a in new_centers])
def kmeans(X, K):
# centroids = X[np.random.choice(X.shape[0], K, replace=False)]
centroids = np.array([[1.0, 1.0], [5.0, 7.0]])
it = 0
while True:
it += 1
D = cdist(X, centroids)
labels = np.argmin(D, axis=1)
new_centroids = np.zeros((K, X.shape[1]))
for k in range(K):
new_centroids[k, :] = np.mean(X[labels == k, :], axis=0)
display(X, K, labels)
plt.show()
if has_converged(centroids, new_centroids):
break
centroids = new_centroids
return labels, centroids
def display(X, K, labels):
for i in range(K):
X0 = X[labels == i, :]
plt.plot(X0[:, 0], X0[:, 1], '.')
def error(X, K, labels):
sum = 0
for i in range(K):
X0 = X[labels == i, :]
sum += np.std(X0)
print(sum / K)
def random_data():
for i in range(6):
mean = 200 * np.random.random_sample((1, 2))
X0 = np.random.multivariate_normal(mean[0], [[10, 0], [0, 10]], np.random.randint(20, 50))
if i == 0:
X = X0
else:
X = np.concatenate((X, X0))
return X
from sklearn.cluster import KMeans
A = np.array([[1.0, 1.5, 3.0, 5.0, 3.5, 4.5, 3.5]])
B = np.array([[1.0, 2.0, 4.0, 7.0, 5.0, 5.0, 4.5]])
X = np.append(A.T, B.T, axis=1)
# X = random_data()
for K in range(2, 10):
(labels, centroids) = kmeans(X, K)
display(X, K, labels)
plt.show()
error(X, K, labels)
cls = KMeans(n_clusters=K, random_state=0)
cls.fit(X)
lbl = cls.labels_
display(X, K, lbl)
plt.show()
|
cuongdd2/cs582
|
lab6/prob4.py
|
prob4.py
|
py
| 1,807 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41149275833
|
""" Internet Validators
- ValidateEmail
- ValidateIP
- ValidateURL
"""
import socket
import re
from email_validator import validate_email, EmailNotValidError
from flask_validator import Validator
class ValidateEmail(Validator):
""" Validate Email type.
Check if the new value is a valid e-mail.
Using this library to validate https://github.com/JoshData/python-email-validator
Args:
field: SQLAlchemy column to validate
allow_null: (bool) Allow null values
allow_smtputf8: (bool) Set to False to prohibit internationalized addresses that would require the SMTPUTF8.
check_deliverability: (bool) Set to False to skip the domain name resolution check.
allow_empty_local (bool) Set to True to allow an empty local part (i.e. @example.com),
e.g. for validating Postfix aliases.
allow_null: (bool) Allow null values
throw_exception: (bool) Throw a ValidateError if the validation fails
"""
allow_smtputf8 = True
check_deliverability = True
allow_empty_local = False
def __init__(self, field, allow_smtputf8=True,check_deliverability=True, allow_empty_local=False,
allow_null=True, throw_exception=False, message=None):
self.allow_smtputf8 = allow_smtputf8
self.check_deliverability = check_deliverability
self.allow_empty_local = allow_empty_local
Validator.__init__(self, field, allow_null, throw_exception, message)
def check_value(self, value):
try:
validate_email(value,
allow_smtputf8=self.allow_smtputf8,
check_deliverability=self.check_deliverability,
allow_empty_local=self.allow_empty_local)
return True
except EmailNotValidError:
return False
class ValidateIP(Validator):
""" Validate Regex
Compare a value against a regular expresion
Args:
field: SQLAlchemy column to validate
ipv6: Match against IPV6
allow_null: (bool) Allow null values
throw_exception: (bool) Throw a ValidateError if the validation fails
"""
ipv6 = None
def __init__(self, field, ipv6=False, allow_null=True, throw_exception=False, message=None):
self.ipv6 = ipv6
Validator.__init__(self, field, allow_null, throw_exception, message)
def check_value(self, value):
try:
if not self.ipv6:
socket.inet_pton(socket.AF_INET, value)
else:
socket.inet_pton(socket.AF_INET6, value)
return True
except socket.error:
return False
class ValidateURL(Validator):
""" Validate URL
Check if the values is a valid URL
Args:
field: SQLAlchemy column to validate
allow_null: (bool) Allow null values. Default True
throw_exception: (bool) Throw a ValidateError if the validation fails
"""
regex = r'^[a-z]+://(?P<host>[^/:]+)(?P<port>:[0-9]+)?(?P<path>\/.*)?$'
def check_value(self, value):
if re.match(self.regex, value):
return True
else:
return False
|
xeBuz/Flask-Validator
|
flask_validator/constraints/internet.py
|
internet.py
|
py
| 3,190 |
python
|
en
|
code
| 28 |
github-code
|
6
|
20662954004
|
# Imports
from math import pi
pi * 71 / 223
from math import sin
sin(pi/2)
# Function values
max
max(3, 4)
f = max
f
f(3, 4)
max = 7
f(3, 4)
f(3, max)
f = 2
# f(3, 4)
# User-defined functions
from operator import add, mul
add(2, 3)
mul(2, 3)
def square(x):
return mul(x, x)
square(21)
|
Thabhelo/CS7
|
lab/lab1/code03.py
|
code03.py
|
py
| 295 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18555731856
|
import Adafruit_DHT
from main.SQL import TempHandler
import time
class Temps():
def __init__(self):
self.sensor = Adafruit_DHT.DHT11
self.pin = 17
self.humidity = 0
self.temperature = 0
def getDHT(self):
self.humidity, self.temperature = Adafruit_DHT.read_retry(self.sensor, self.pin)
dic ={}
if self.humidity is not None and self.temperature is not None:
dic['temperature'] = self.temperature
dic['humidity'] = self.humidity
temp = TempHandler.Temp(temperature=self.temperature, humidity=self.humidity)
TempHandler.insert(temp)
return dic
else:
return 'Failed to get reading. Try again!'
|
chinazkk/raspberry_car
|
main/Temps.py
|
Temps.py
|
py
| 736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39204707046
|
"""
题目介绍:剑指 Offer 48. 最长不含重复字符的子字符串
请从字符串中找出一个最长的不包含重复字符的子字符串,计算该最长子字符串的长度
"""
def length_of_longest_substring(s):
dic = {}
res = tmp = 0
for j in range(len(s)):
# 获取索引i
i = dic.get(s[j], -1)
# 更新哈希表
dic[s[j]] = j
tmp = tmp + 1 if tmp < j - i else j - i
res = max(res, tmp)
return res
s='abcdefgabcdabcikghjk'
print(length_of_longest_substring(s))
def singleNumber(nums):
counts = [0] * 32
for num in nums:
for j in range(32):
counts[j] += num & 1
num >>= 1
res, m = 0, 3
for i in range(32):
res <<= 1
res |= counts[31 - i] % m
return res if counts[31] % m == 0 else ~( res ^ 0xffffffff)
|
Davidhfw/algorithms
|
python/dp/48_lengthOfLongestSubstring.py
|
48_lengthOfLongestSubstring.py
|
py
| 856 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74866931066
|
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import os
class HobsHeader(object):
sim_head = '"SIMULATED EQUIVALENT"'
obs_head = '"OBSERVED VALUE"'
obs_name = '"OBSERVATION NAME"'
date = 'DATE'
dyear = 'DECIMAL_YEAR'
header = {sim_head: None,
obs_head: None,
obs_name: None,
date: None,
dyear: None}
class HobsOut(dict):
"""
Reads output data from Hobs file and prepares it for post processing.
Class sets observations to an ordered dictionary based on observation name
If observation name is consistant for a site, a time series is created
for plotting!
Parameters
----------
filename : str
hobs filename
strip_after : str
flag to indicate a character to strip the hobs label after for
grouping wells.
Example: OBS_1
OBS_2
strip_after could be set to "_" and then all OBS observations will
be stored under the OBS key. This is extremely useful for plotting
and calculating statistics
"""
def __init__(self, filename, strip_after=""):
super(HobsOut, self).__init__()
self.name = filename
self._strip_after = strip_after
self._dataframe = None
self.__read_hobs_output()
def __read_hobs_output(self):
"""
Method to read a hobs output file. Dynamically sets header information
and reads associated values.
Sets values to HobsOut dictionary
"""
with open(self.name) as hobout:
for ix, line in enumerate(hobout):
if ix == 0:
self.__set_header(line)
else:
self.__set_dictionary_values(line)
def __set_dictionary_values(self, line):
"""
Method to set incoming hobs line to dictionary data values
Args:
line: (str)
"""
t = line.strip().split()
obsname = t[HobsHeader.header[HobsHeader.obs_name]]
dict_name = obsname
if self._strip_after:
dict_name = obsname.split(self._strip_after)[0]
simval = float(t[HobsHeader.header[HobsHeader.sim_head]])
obsval = float(t[HobsHeader.header[HobsHeader.obs_head]])
residual = simval - obsval
date = self.__set_datetime_object(t[HobsHeader.header[HobsHeader.date]])
decimal_date = float(t[HobsHeader.header[HobsHeader.dyear]])
if dict_name in self:
self[dict_name]['simval'].append(simval)
self[dict_name]['obsval'].append(obsval)
self[dict_name]['date'].append(date)
self[dict_name]['decimal_date'].append(decimal_date)
self[dict_name]['residual'].append(residual)
self[dict_name]["obsname"].append(obsname)
else:
self[dict_name] = {"obsname": [obsname], "date": [date],
"decimal_date": [decimal_date],
"simval": [simval], "obsval": [obsval],
"residual": [residual]}
def __set_header(self, line):
"""
Reads header line and sets header index
Parameters
----------
line : str
first line of the HOB file
"""
n = 0
s = ""
for i in line:
s += i
if s in HobsHeader.header:
HobsHeader.header[s] = n
n += 1
s = ""
elif s in (" ", "\t", "\n"):
s = ""
else:
pass
for key, value in HobsHeader.header.items():
if value is None:
raise AssertionError("HobsHeader headings must be updated")
def __set_datetime_object(self, s):
"""
Reformats a string of YYYY-mm-dd to a datetime object
Parameters
----------
s : str
string of YYYY-mm-dd
Returns
-------
datetime.date
"""
return dt.datetime.strptime(s, "%Y-%m-%d")
def __get_date_string(self, date):
"""
Parmaeters
----------
date: datetime.datetime object
Returns
-------
string
"""
return date.strftime("%Y/%m/%d")
@property
def obsnames(self):
"""
Return a list of obsnames from the HobsOut dictionary
"""
return self.keys()
def to_dataframe(self):
"""
Method to get a pandas dataframe object of the
HOBs data.
Returns
-------
pd.DataFrame
"""
import pandas as pd
if self._dataframe is None:
df = None
for hobsname, d in self.items():
t = pd.DataFrame(d)
if df is None:
df = t
else:
df = pd.concat([df, t], ignore_index=True)
self._dataframe = df
return self._dataframe
def get_sum_squared_errors(self, obsname):
"""
Returns the sum of squared errors from the residual
Parameters
----------
obsname : str
observation name
Returns
-------
float: sum of square error
"""
return sum([i**2 for i in self[obsname]['residual']])
def get_rmse(self, obsname):
"""
Returns the RMSE from the residual
Parameters
----------
obsname : str
observation name
Returns
-------
float: rmse
"""
return np.sqrt(np.mean([i**2 for i in self[obsname]['residual']]))
def get_number_observations(self, obsname):
"""
Returns the number of observations for an obsname
Parameters
----------
obsname : str
observation name
Returns
-------
int
"""
return len(self[obsname]['simval'])
def get_maximum_residual(self, obsname):
"""
Returns the datetime.date and maximum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
index = data.index(max(data))
date = self[obsname]['date'][index]
return date, max(data)
def get_minimum_residual(self, obsname):
"""
Returns the datetime.date, minimum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
index = data.index(min(data))
date = self[obsname]['date'][index]
return date, min(data)
def get_mean_residual(self, obsname):
"""
Returns the datetime.date, minimum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
return np.mean(data)
def get_median_residual(self, obsname):
"""
Returns the datetime.date, minimum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
return np.median(data)
def get_maximum_residual_heads(self, obsname):
"""
Returns the datetime.date, simulated, and observed
heads at the maximum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, simulated head, observed head)
"""
resid = self[obsname]['residual']
index = resid.index(max(resid))
observed = self[obsname]['obsval'][index]
simulated = self[obsname]['simval'][index]
date = self[obsname]['date'][index]
return date, simulated, observed
def get_minimum_residual_heads(self, obsname):
"""
Returns the datetime.date, simulated, and observed
heads at the maximum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, simulated head, observed head)
"""
resid = self[obsname]['residual']
index = resid.index(min(resid))
observed = self[obsname]['obsval'][index]
simulated = self[obsname]['simval'][index]
date = self[obsname]['date'][index]
return date, simulated, observed
def get_residual_bias(self, filter=None):
"""
Method to determine the bias of measurements +-
by checking the residual. Returns fraction of residuals
> 0.
Parameters
----------
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false to use
Returns
-------
(float) fraction of residuals greater than zero
"""
nobs = 0.
ngreaterzero = 0.
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
residual = np.array(meta_data['residual'])
rgreaterzero = sum((residual > 0))
nobs += residual.size
ngreaterzero += rgreaterzero
try:
bias = ngreaterzero / nobs
except ZeroDivisionError:
raise ZeroDivisionError("No observations found!")
return bias
def write_dbf(self, dbfname, filter=None):
"""
Method to write a dbf file from a the HOBS dictionary
Parameters
----------
dbfname : str
dbf file name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
"""
import shapefile
data = []
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
for ix, val in enumerate(meta_data['simval']):
data.append([obsname,
self.__get_date_string(meta_data['date'][ix]),
val,
meta_data['obsval'][ix],
meta_data['residual'][ix]])
try:
# traps for pyshp 1 vs. pyshp 2
w = shapefile.Writer(dbf=dbfname)
except Exception:
w = shapefile.Writer()
w.field("HOBSNAME", fieldType="C")
w.field("HobsDate", fieldType="D")
w.field("HeadSim", fieldType='N', decimal=8)
w.field("HeadObs", fieldType="N", decimal=8)
w.field("Residual", fieldType="N", decimal=8)
for rec in data:
w.record(*rec)
try:
w.save(dbf=dbfname)
except AttributeError:
w.close()
def write_min_max_residual_dbf(self, dbfname, filter=None):
"""
Method to write a dbf of transient observations
using observation statistics
Parameters
----------
dbfname : str
dbf file name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
"""
import shapefile
data = []
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
max_date, resid_max = self.get_maximum_residual(obsname)
min_date, resid_min = self.get_minimum_residual(obsname)
simval_max, obsval_max = self.get_maximum_residual_heads(obsname)[1:]
simval_min, obsval_min = self.get_minimum_residual_heads(obsname)[1:]
data.append([obsname,
self.get_number_observations(obsname),
self.__get_date_string(max_date), resid_max,
self.__get_date_string(min_date), resid_min,
simval_max, obsval_max, simval_min, obsval_min])
try:
# traps for pyshp 1 vs. pyshp 2
w = shapefile.Writer(dbf=dbfname)
except Exception:
w = shapefile.Writer()
w.field("HOBSNAME", fieldType="C")
w.field("FREQUENCY", fieldType="N")
w.field("MaxDate", fieldType="C")
w.field("MaxResid", fieldType='N', decimal=8)
w.field("MinDate", fieldType="C", decimal=8)
w.field("MinResid", fieldType="N", decimal=8)
w.field("MaxHeadSim", fieldType="N", decimal=8)
w.field("MaxHeadObs", fieldType="N", decimal=8)
w.field("MinHeadSim", fieldType="N", decimal=8)
w.field("MinHeadObs", fieldType="N", decimal=8)
for rec in data:
w.record(*rec)
try:
w.save(dbf=dbfname)
except AttributeError:
w.close()
def __filter(self, obsname, filter):
"""
Boolean filetering method, checks if observation name
is in the filter.
Parameters
----------
obsname : str
observation name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
Returns
-------
bool: True if obsname in filter
"""
if filter is None:
return False
elif isinstance(filter, list) or isinstance(filter, tuple):
if obsname in list:
return True
elif isinstance(filter, str):
if obsname == filter:
return True
elif callable(filter):
if filter(obsname):
return True
else:
raise Exception("Filter is not an appropriate type")
return False
def write_summary_statistics_csv(self, csvname, filter=None):
"""
Method to write summary calibration statistics to a
CSV file for analysis and reports
Parameters
----------
csvname : str
csv file name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
"""
data = []
header = ["Well name", "Average", "Median",
"Minimum", "Maximum", "RMSE ft", "Frequency"]
for obsname, meta_data in sorted(self.items()):
if self.__filter(obsname, filter):
continue
resid_mean = self.get_mean_residual(obsname)
resid_median = self.get_median_residual(obsname)
resid_max = self.get_maximum_residual(obsname)[-1]
resid_min = self.get_minimum_residual(obsname)[-1]
rmse = self.get_rmse(obsname)
frequency = self.get_number_observations(obsname)
data.append((obsname, resid_mean, resid_median,
resid_min, resid_max, rmse, frequency))
data = np.array(data, dtype=[('id', 'O'), ('mean', float),
('med', float), ('min', float),
('max', float), ('rmse', float),
('num', np.int)])
with open(csvname, "w") as foo:
foo.write(",".join(header) + "\n")
np.savetxt(foo, data, fmt="%15s,%.2f,%2f,%2f,%2f,%2f,%d")
def plot(self, obsname, *args, **kwargs):
"""
Plotting functionality from the hobs dictionary
Parameters
----------
obsname: str
hobs package observation name
*args: matplotlib args
**kwargs: matplotlib kwargs
Returns
-------
matplotlib.pyplot.axes object
"""
simulated = True
if "observed" in kwargs:
simulated = False
kwargs.pop('observed')
observed = True
if "simulated" in kwargs:
observed = False
kwargs.pop('simulated')
if obsname not in self:
raise AssertionError("Obsname {}: not valid".format(obsname))
axes = False
if 'ax' in kwargs:
ax = kwargs.pop('ax')
axes = True
if not axes:
ax = plt.subplot(111)
obsval = self[obsname]['obsval']
simval = self[obsname]['simval']
date = self[obsname]['date']
if observed:
kwargs['label'] = "Observed"
kwargs['color'] = 'r'
ax.plot(date, obsval, *args, **kwargs)
if simulated:
kwargs['label'] = "Simulated"
kwargs['color'] = 'b'
ax.plot(date, simval, *args, **kwargs)
return ax
def plot_measured_vs_simulated(self, filter=None, **kwargs):
"""
Plots measured vs. simulated data along a 1:1 profile.
Parameters
----------
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
**kwargs: matplotlib.pyplot plotting kwargs
Returns
-------
matplotlib.pyplot.axes object
"""
axes = plt.subplot(111)
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
simulated = meta_data['simval']
observed = meta_data['obsval']
axes.plot(observed, simulated, 'bo', markeredgecolor='k')
return axes
def plot_simulated_vs_residual(self, filter=None,
histogram=False, **kwargs):
"""
Creates a matplotlib plot of simulated heads vs residual
Parameters
----------
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
histogram: (bool)
Boolean variable that defines either a scatter plot (False)
or a histogram (True) of residuals
**kwargs: matplotlib.pyplot plotting kwargs
Returns
-------
matplotlib.pyplot.axes object
"""
axes = plt.subplot(111)
if not histogram:
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
residual = meta_data['residual']
observed = meta_data['obsval']
axes.plot(observed, residual, 'bo', markeredgecolor="k")
else:
bins = np.arange(-25, 26, 5)
d = {}
for ix, abin in enumerate(bins):
frequency = 0
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
for residual in meta_data['residual']:
if ix == 0:
if residual < abin:
frequency += 1
elif ix == (len(bins) - 1):
if residual > abin:
frequency += 1
else:
if bins[ix - 1] <= residual < abin:
frequency += 1
if ix == 0:
name = "Less than {}".format(abin)
elif ix == (len(bins) - 1):
name = "Greater than {}".format(abin)
else:
name = "{} to {}".format(bins[ix - 1] + 1, abin)
d[ix + 1] = {'name': name,
'frequency': frequency}
tick_num = []
tick_name = []
for index, meta_data in sorted(d.items()):
axes.bar(index, meta_data['frequency'], width=0.8,
**kwargs)
tick_num.append(index)
tick_name.append(meta_data['name'])
plt.xticks(tick_num, tick_name, rotation=45, fontsize=10)
plt.xlim([0.5, len(tick_num) + 1])
plt.subplots_adjust(left=0.12, bottom=0.22,
right=0.90, top=0.90,
wspace=0.20, hspace=0.20)
plt.ylabel("Frequency")
return axes
if __name__ == "__main__":
ws = r'C:\Users\jlarsen\Desktop\Lucerne\Lucerne_OWHM\V0_initial_from_MODOPTIM\output'
hobs_name = "hobs.out"
tmp = HobsOut(os.path.join(ws, hobs_name))
tmp.plot("04N01W01R04S", "o-")
plt.legend(loc=0, numpoints=1)
plt.show()
print('break')
|
jlarsen-usgs/HydrographTools
|
hobs_output.py
|
hobs_output.py
|
py
| 22,180 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33837428124
|
import array
import struct
import sys
from collections import namedtuple
import plotly.express as px
import numpy as np
from scipy.ndimage import uniform_filter1d
from statsmodels.nonparametric.smoothers_lowess import lowess
import matplotlib.pyplot as plt
from math import degrees, atan
import scipy.signal
TYPE_DIGITAL = 0
TYPE_ANALOG = 1
expected_version = 0
AnalogData = namedtuple('AnalogData', ('begin_time', 'sample_rate', 'downsample', 'num_samples', 'samples'))
def parse_analog(f):
# Parse header
identifier = f.read(8)
if identifier != b"<SALEAE>":
raise Exception("Not a saleae file")
version, datatype = struct.unpack('=ii', f.read(8))
if version != expected_version or datatype != TYPE_ANALOG:
raise Exception("Unexpected data type: {}".format(datatype))
# Parse analog-specific data
begin_time, sample_rate, downsample, num_samples = struct.unpack('=dqqq', f.read(32))
# Parse samples
samples = array.array("f")
samples.fromfile(f, num_samples)
return AnalogData(begin_time, sample_rate, downsample, num_samples, samples)
if __name__ == '__main__':
times = []
volts = []
anchor = 0
filename = sys.argv[1]
print("Opening " + filename)
with open(filename, 'rb') as f:
data = parse_analog(f)
# Print out all analog data
print("Begin time: {}".format(data.begin_time))
print("Sample rate: {}".format(data.sample_rate))
print("Downsample: {}".format(data.downsample))
print("Number of samples: {}".format(data.num_samples))
j = 0
for idx, voltage in enumerate(data.samples):
sample_num = idx * data.downsample
#thing/(thing/sec) = thing*(sec/thing) = sec
time = data.begin_time + (float(sample_num) / data.sample_rate)
times.append(time)
volts.append(min(voltage,1.3345))
j = j + 1
volts = scipy.ndimage.median_filter(volts, int((data.sample_rate/data.downsample)*.002)+1)
#volts = uniform_filter1d(volts, size=int((data.sample_rate/data.downsample)*.002))
"""
filtered = lowess(volts, times, frac=0.0005)
plt.plot(filtered[:, 0], filtered[:, 1], 'r-', linewidth=3)
plt.show()
"""
upper_bound = lower_bound = volts[0]
for i in range(0,int(data.num_samples*.2)):
upper_bound = max(upper_bound, volts[i])
lower_bound = min(lower_bound, volts[i])
v_noise = .0
sample_size = .3
slope_range = int(data.num_samples*.05)
temp_threshold = 0.0
angle_threshold = 30.0
tslope_range = 10
"""
for s in range(100,11000,100):
i = 0
while i < int(data.num_samples*sample_size):
l_b = max(i-s,0)
r_b = min(i+s,data.num_samples)
v_noise = volts[r_b] - volts[l_b]
if temp_threshold <= abs(degrees(atan(v_noise/((times[r_b]-times[l_b]))))):
temp_threshold = abs(degrees(atan(v_noise/((times[r_b]-times[l_b])))))
print("({},{})({},{})".format(times[l_b], volts[l_b], times[r_b], volts[r_b]))
i = i + 1
print("Temp Threshold: {}".format(temp_threshold))
if temp_threshold < angle_threshold:
angle_threshold = temp_threshold
slope_range = s
"""
print("Angle Threshold: {}".format(angle_threshold))
start = 0
state = 0
#red is horizontal, b is rise, green is fall
colors = ['r','b','g']
i = 1
angle_threshold = 1
slope_range = int(data.num_samples*.002)
while i < data.num_samples:
l_b = max(i-slope_range,0)
r_b = min(i+slope_range,data.num_samples-1)
v_noise = volts[r_b] - volts[l_b]
angle = degrees(atan(v_noise/((times[r_b]-times[l_b]))))
if abs(angle) <= angle_threshold and state != 0:
#print("Horizontal line detected: {}\n".format(angle))
plt.plot(times[start:i], volts[start:i], colors[state])
state = 0
start = i
elif angle > angle_threshold and state != 1:
#print("Rise detected: {}\n".format(angle))
plt.plot(times[start:i], volts[start:i], colors[state])
state = 1
start = i
elif angle < -angle_threshold and state != 2:
#print("Descent detected: {}\n".format(angle))
plt.plot(times[start:i], volts[start:i], colors[state])
state = 2
start = i
i = i + 1
plt.plot(times[start:i], volts[start:i], colors[state])
#plt.plot(times, volts)
plt.show()
|
nkelly1322/analog_analysis
|
AnalogAnalysis.py
|
AnalogAnalysis.py
|
py
| 4,559 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74732381948
|
import numpy as np
import tensorflow as tf
import cv2
def colormap_jet(img):
color_image = cv2.applyColorMap(np.uint8(img), cv2.COLORMAP_JET)
return color_image
def color_disparity(disparity):
with tf.variable_scope('color_disparity'):
batch_size = disparity.shape[0]
color_maps = []
for i in range(batch_size):
color_disp = tf.py_func(colormap_jet, [-disparity[i]], tf.uint8)
color_maps.append(color_disp)
color_batch = tf.stack(color_maps, axis=0)
return color_batch
def count_text_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
|
fabiotosi92/monoResMatch-Tensorflow
|
utils.py
|
utils.py
|
py
| 677 |
python
|
en
|
code
| 117 |
github-code
|
6
|
21916066362
|
#condig=utf-8
###二叉树
#树节点
class Node(object):
def __init__(self,elem=-1, lchild =None,rchild = None):
#节点的值
self.elem = elem
#左节点
self.lchild = lchild
#右节点
self.rchild = rchild
#树
class Tree(object):
def __init__(self):
self.root = Node()
self.myQueue = []
def add(self,elem):
node = Node(elem)
if self.root.elem == -1: # 如果树是空的,则对根节点赋值
self.root = node
self.myQueue.append(self.root)
else:
treeNode = self.myQueue[0] #为此节点的左右子节点赋值
if treeNode.lchild == None:
treeNode.lchild = node
self.myQueue.append(treeNode.lchild)
else:
print(list(f.elem for f in self.myQueue))
treeNode.rchild=node
self.myQueue.append(treeNode.rchild)
self.myQueue.pop(0) # 如果该结点存在右子树,将此结点丢弃。
###先序遍历
def front_digui(self,root):
if root == None:
return
print(root.elem)
self.front_digui(root.lchild)
self.front_digui(root.rchild)
###递归中序遍历
def middle_digui(self,root):
if root == None:
return
self.middle_digui(root.lchild)
print(root.elem)
self.middle_digui(root.rchild)
###递归后序遍历
def lafter_digui(self,root):
if root == None:
return
self.middle_digui(root.lchild)
self.middle_digui(root.rchild)
print(root.elem)
###队列层次遍历
def level_queue(self,root):
if root == None:
return
myQueue=[]
node = root
myQueue.append(node)
while myQueue:
node=myQueue.pop(0)
print(node.elem)
if node.lchild != None:
myQueue.append(node.lchild)
if node.rchild != None:
myQueue.append(node.rchild)
###堆栈先序遍历
def front_stack(self,root):
if root == None:
return
myStack = []
node = root
while node or myStack:
while node:
print(node.elem)
myStack.append(node)
node = node.lchild
node = myStack.pop()
node = node.rchild
###堆栈中序遍历
def middle_stack(self, root):
if root == None:
return
myStack = []
node = root
while node or myStack:
while node: #从根节点开始,一直找它的左子树
print(node.elem)
myStack.append(node)
node = node.lchild
node = myStack.pop() #while结束表示当前节点node为空,即前一个节点没有左子树了
node = node.rchild #开始查看它的右子树
##堆栈后序遍历
def later_stack(self,root):
if root == None:
return
myStack1 = []
myStack2 = []
node = root
myStack1.append(node)
while myStack1: #这个while循环的功能是找出后序遍历的逆序,存在myStack2里面
node = myStack1.pop()
if node.lchild:
myStack1.append(node.lchild)
if node.rchild:
myStack1.append(node.rchild)
myStack2.append(node)
while myStack2: #将myStack2中的元素出栈,即为后序遍历次序
print(myStack2.pop().elem)
if __name__ == '__main__':
pass
# """主函数"""
# elems = range(10) #生成十个数据作为树节点
# tree = Tree() #新建一个树对象
# for elem in elems:
# tree.add(elem) #逐个添加树的节点
#
#
# print ('\n\n递归实现先序遍历:')
# # tree.front_digui(tree.root)
# # tree.level_queue(tree.root)
# # print(tree.root.elem)
# # print(list(f.elem for f in tree.myQueue))
# tree.middle_digui(tree.root)
|
DC-Joney/Machine-Learning
|
Arithmetic/nodetree.py
|
nodetree.py
|
py
| 4,247 |
python
|
en
|
code
| 1 |
github-code
|
6
|
24698015874
|
##
# The model uses elements from both the Transformer Encoder as introduced in
# “Attention is All You Need” (https://arxiv.org/pdf/1706.03762.pdf) and the
# Message Passing Neural Network (MPNN) as described in "Neural Message Passing
# for Quantum Chemistry" paper (https://arxiv.org/pdf/1704.01212.pdf) .
#
# The overall architecture most closely resembles the Transformer Encoder with
# stacked encoder blocks and layers connected through residual connections with
# layer norm. In this case however the encoder blocks are build up of two
# message passing layers, followed by three different types of attention layers
# with a final pointwise feed-forward network.
#
# Both message passing layers use a slightly modified version of the edge
# networks as detailed in the MPNN paper. The first layer allows message passing
# between bonded atoms, whereas the second layer does so for the atom pairs for
# which we need to predict the scalar coupling constant. Unlike the attention
# layers the message passing layers' parameters are tied across blocks.
#
# The three attention layers are:
# 1. distance based gaussian attention
# 2. graph distance based attention
# 3. scaled dot product self attention
#
# Although the final layers in the block resemble the encoder blocks of the
# Transformer model, there are several additional layers designed specifically
# to capture the structure and relationships among atoms in a molecule.
#
# Much of the code is adopted from the annotated version of the Transformer
# paper, which can be found here
# (http://nlp.seas.harvard.edu/2018/04/03/attention.html).
import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from fcnet import FullyConnectedNet, hidden_layer
from scatter import scatter_mean
from layernorm import LayerNorm
def clones(module, N):
"""Produce N identical layers."""
return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super().__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"""Apply residual connection to any sublayer with the same size."""
return x + self.dropout(sublayer(self.norm(x)))
def _gather_nodes(x, idx, sz_last_dim):
idx = idx.unsqueeze(-1).expand(-1, -1, sz_last_dim)
return x.gather(1, idx)
class ENNMessage(nn.Module):
"""
The edge network message passing function from the MPNN paper. Optionally
adds and additional cosine angle based attention mechanism over incoming
messages.
"""
PAD_VAL = -999
def __init__(self, d_model, d_edge, kernel_sz, enn_args={}, ann_args=None):
super().__init__()
assert kernel_sz <= d_model
self.d_model, self.kernel_sz = d_model, kernel_sz
self.enn = FullyConnectedNet(d_edge, d_model*kernel_sz, **enn_args)
if ann_args: self.ann = FullyConnectedNet(1, d_model, **ann_args)
else: self.ann = None
def forward(self, x, edges, pairs_idx, angles=None, angles_idx=None, t=0):
"""Note that edges and pairs_idx raw inputs are for a unidirectional
graph. They are expanded to allow bidirectional message passing."""
if t==0:
self.set_a_mat(edges)
if self.ann: self.set_attn(angles)
# concat reversed pairs_idx for bidirectional message passing
self.pairs_idx = torch.cat([pairs_idx, pairs_idx[:,:,[1,0]]], dim=1)
return self.add_message(torch.zeros_like(x), x, angles_idx)
def set_a_mat(self, edges):
n_edges = edges.size(1)
a_vect = self.enn(edges)
a_vect = a_vect / (self.kernel_sz ** .5) # rescale
mask = edges[:,:,0,None].expand(a_vect.size())==self.PAD_VAL
a_vect = a_vect.masked_fill(mask, 0.0)
self.a_mat = a_vect.view(-1, n_edges, self.d_model, self.kernel_sz)
# concat a_mats for bidirectional message passing
self.a_mat = torch.cat([self.a_mat, self.a_mat], dim=1)
def set_attn(self, angles):
angles = angles.unsqueeze(-1)
self.attn = self.ann(angles)
mask = angles.expand(self.attn.size())==self.PAD_VAL
self.attn = self.attn.masked_fill(mask, 0.0)
def add_message(self, m, x, angles_idx=None):
"""Add message for atom_{i}: m_{i} += sum_{j}[attn_{ij} A_{ij}x_{j}]."""
# select the 'x_{j}' feeding into the 'm_{i}'
x_in = _gather_nodes(x, self.pairs_idx[:,:,1], self.d_model)
# do the matrix multiplication 'A_{ij}x_{j}'
if self.kernel_sz==self.d_model: # full matrix multiplcation
ax = (x_in.unsqueeze(-2) @ self.a_mat).squeeze(-2)
else: # do a convolution
x_padded = F.pad(x_in, self.n_pad)
x_unfolded = x_padded.unfold(-1, self.kernel_sz, 1)
ax = (x_unfolded * self.a_mat).sum(-1)
# apply atttention
if self.ann:
n_pairs = self.pairs_idx.size(1)
# average all attn(angle_{ijk}) per edge_{ij}.
# i.e.: attn_{ij} = sum_{k}[attn(angle_{ijk})] / n_angles_{ij}
ave_att = scatter_mean(self.attn, angles_idx, num=n_pairs, dim=1,
out=torch.ones_like(ax))
ax = ave_att * ax
# sum up all 'A_{ij}h_{j}' per node 'i'
idx_0 = self.pairs_idx[:,:,0,None].expand(-1, -1, self.d_model)
return m.scatter_add(1, idx_0, ax)
@property
def n_pad(self):
k = self.kernel_sz
return (k // 2, k // 2 - int(k % 2 == 0))
class MultiHeadedDistAttention(nn.Module):
"""Generalizes the euclidean and graph distance based attention layers."""
def __init__(self, h, d_model):
super().__init__()
self.d_model, self.d_k, self.h = d_model, d_model // h, h
self.attn = None
self.linears = clones(nn.Linear(d_model, d_model), 2)
def forward(self, dists, x, mask):
batch_size = x.size(0)
x = self.linears[0](x).view(batch_size, -1, self.h, self.d_k)
x, self.attn = self.apply_attn(dists, x, mask)
x = x.view(batch_size, -1, self.h * self.d_k)
return self.linears[-1](x)
def apply_attn(self, dists, x, mask):
attn = self.create_raw_attn(dists, mask)
attn = attn.transpose(-2,-1).transpose(1, 2)
x = x.transpose(1, 2)
x = torch.matmul(attn, x)
x = x.transpose(1, 2).contiguous()
return x, attn
def create_raw_attn(self, dists, mask):
pass
class MultiHeadedGraphDistAttention(MultiHeadedDistAttention):
"""Attention based on an embedding of the graph distance matrix."""
MAX_GRAPH_DIST = 10
def __init__(self, h, d_model):
super().__init__(h, d_model)
self.embedding = nn.Embedding(self.MAX_GRAPH_DIST+1, h)
def create_raw_attn(self, dists, mask):
emb_dists = self.embedding(dists)
mask = mask.unsqueeze(-1).expand(emb_dists.size())
emb_dists = emb_dists.masked_fill(mask==0, -1e9)
return F.softmax(emb_dists, dim=-2).masked_fill(mask==0, 0)
class MultiHeadedEuclDistAttention(MultiHeadedDistAttention):
"""Attention based on a parameterized normal pdf taking a molecule's
euclidean distance matrix as input."""
def __init__(self, h, d_model):
super().__init__(h, d_model)
self.log_prec = nn.Parameter(torch.Tensor(1, 1, 1, h))
self.locs = nn.Parameter(torch.Tensor(1, 1, 1, h))
nn.init.normal_(self.log_prec, mean=0.0, std=0.1)
nn.init.normal_(self.locs, mean=0.0, std=1.0)
def create_raw_attn(self, dists, mask):
dists = dists.unsqueeze(-1).expand(-1, -1, -1, self.h)
z = torch.exp(self.log_prec) * (dists - self.locs)
pdf = torch.exp(-0.5 * z ** 2)
return pdf / pdf.sum(dim=-2, keepdim=True).clamp(1e-9)
def attention(query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'."""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None: scores = scores.masked_fill(mask==0, -1e9)
p_attn = F.softmax(scores, dim=-1).masked_fill(mask==0, 0)
if dropout is not None: p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedSelfAttention(nn.Module):
"""Applies self-attention as described in the Transformer paper."""
def __init__(self, h, d_model, dropout=0.1):
super().__init__()
self.d_model, self.d_k, self.h = d_model, d_model // h, h
self.attn = None
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.dropout = nn.Dropout(p=dropout) if dropout > 0.0 else None
def forward(self, x, mask):
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
batch_size = x.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [
l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l in self.linears[:3]
]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask, self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.d_model)
return self.linears[-1](x)
class AttendingLayer(nn.Module):
"""Stacks the three attention layers and the pointwise feedforward net."""
def __init__(self, size, eucl_dist_attn, graph_dist_attn, self_attn, ff,
dropout):
super().__init__()
self.eucl_dist_attn = eucl_dist_attn
self.graph_dist_attn = graph_dist_attn
self.self_attn = self_attn
self.ff = ff
self.subconns = clones(SublayerConnection(size, dropout), 4)
self.size = size
def forward(self, x, eucl_dists, graph_dists, mask):
eucl_dist_sub = lambda x: self.eucl_dist_attn(eucl_dists, x, mask)
x = self.subconns[0](x, eucl_dist_sub)
graph_dist_sub = lambda x: self.graph_dist_attn(graph_dists, x, mask)
x = self.subconns[1](x, graph_dist_sub)
self_sub = lambda x: self.self_attn(x, mask)
x = self.subconns[2](x, self_sub)
return self.subconns[3](x, self.ff)
class MessagePassingLayer(nn.Module):
"""Stacks the bond and scalar coupling pair message passing layers."""
def __init__(self, size, bond_mess, sc_mess, dropout, N):
super().__init__()
self.bond_mess = bond_mess
self.sc_mess = sc_mess
self.linears = clones(nn.Linear(size, size), 2*N)
self.subconns = clones(SublayerConnection(size, dropout), 2*N)
def forward(self, x, bond_x, sc_pair_x, angles, mask, bond_idx, sc_idx,
angles_idx, t=0):
bond_sub = lambda x: self.linears[2*t](
self.bond_mess(x, bond_x, bond_idx, angles, angles_idx, t))
x = self.subconns[2*t](x, bond_sub)
sc_sub = lambda x: self.linears[(2*t)+1](
self.sc_mess(x, sc_pair_x, sc_idx, t=t))
return self.subconns[(2*t)+1](x, sc_sub)
class Encoder(nn.Module):
"""Encoder stacks N attention layers and one message passing layer."""
def __init__(self, mess_pass_layer, attn_layer, N):
super().__init__()
self.mess_pass_layer = mess_pass_layer
self.attn_layers = clones(attn_layer, N)
self.norm = LayerNorm(attn_layer.size)
def forward(self, x, bond_x, sc_pair_x, eucl_dists, graph_dists, angles,
mask, bond_idx, sc_idx, angles_idx):
"""Pass the inputs (and mask) through each block in turn. Note that for
each block the same message passing layer is used."""
for t, attn_layer in enumerate(self.attn_layers):
x = self.mess_pass_layer(x, bond_x, sc_pair_x, angles, mask,
bond_idx, sc_idx, angles_idx, t)
x = attn_layer(x, eucl_dists, graph_dists, mask)
return self.norm(x)
# After N blocks of message passing and attending, the encoded atom states are
# transferred to the head of the model: a customized feed-forward net for
# predicting the scalar coupling (sc) constant.
# First the relevant pairs of atom states for each sc constant in the batch
# are selected, concatenated and stacked. Also concatenated to the encoded
# states are a set of raw molecule and sc pair specific features. These states
# are fed into a residual block comprised of a dense layer followed by a type
# specific dense layer of dimension 'd_ff' (the same as the dimension used for
# the pointwise feed-forward net).
# The processed states are passed through to a relatively small feed-forward
# net, which predicts each sc contribution seperately plus a residual.
# Ultimately, the predictions of these contributions and the residual are summed
# to predict the sc constant.
def create_contrib_head(d_in, d_ff, act, dropout=0.0, layer_norm=True):
layers = hidden_layer(d_in, d_ff, False, dropout, layer_norm, act)
layers += hidden_layer(d_ff, 1, False, 0.0) # output layer
return nn.Sequential(*layers)
class ContribsNet(nn.Module):
"""The feed-forward net used for the sc contribution and final sc constant
predictions."""
N_CONTRIBS = 5
CONTIB_SCALES = [1, 250, 45, 35, 500] # scales used to make the 5 predictions of similar magnitude
def __init__(self, d_in, d_ff, vec_in, act, dropout=0.0, layer_norm=True):
super().__init__()
contrib_head = create_contrib_head(d_in, d_ff, act, dropout, layer_norm)
self.blocks = clones(contrib_head, self.N_CONTRIBS)
def forward(self, x):
ys = torch.cat(
[b(x)/s for b,s in zip(self.blocks, self.CONTIB_SCALES)], dim=-1)
return torch.cat([ys[:,:-1], ys.sum(dim=-1, keepdim=True)], dim=-1)
class MyCustomHead(nn.Module):
"""Joins the sc type specific residual block with the sc contribution
feed-forward net."""
PAD_VAL = -999
N_TYPES = 8
def __init__(self, d_input, d_ff, d_ff_contribs, pre_layers=[],
post_layers=[], act=nn.ReLU(True), dropout=3*[0.], norm=False):
super().__init__()
fc_pre = hidden_layer(d_input, d_ff, False, dropout[0], norm, act)
self.preproc = nn.Sequential(*fc_pre)
fc_type = hidden_layer(d_ff, d_input, False, dropout[1], norm, act)
self.types_net = clones(nn.Sequential(*fc_type), self.N_TYPES)
self.contribs_net = ContribsNet(
d_input, d_ff_contribs, d_ff, act, dropout[2], layer_norm=norm)
def forward(self, x, sc_types):
# stack inputs with a .view for easier processing
x, sc_types = x.view(-1, x.size(-1)), sc_types.view(-1)
mask = sc_types != self.PAD_VAL
x, sc_types = x[mask], sc_types[mask]
x_ = self.preproc(x)
x_types = torch.zeros_like(x)
for i in range(self.N_TYPES):
t_idx = sc_types==i
if torch.any(t_idx): x_types[t_idx] = self.types_net[i](x_[t_idx])
else: x_types = x_types + 0.0 * self.types_net[i](x_) # fake call (only necessary for distributed training - to make sure all processes have gradients for all parameters)
x = x + x_types
return self.contribs_net(x)
class Transformer(nn.Module):
"""Molecule transformer with message passing."""
def __init__(self, d_atom, d_bond, d_sc_pair, d_sc_mol, N=6, d_model=512,
d_ff=2048, d_ff_contrib=128, h=8, dropout=0.1, kernel_sz=128,
enn_args={}, ann_args={}):
super().__init__()
assert d_model % h == 0
self.d_model = d_model
c = copy.deepcopy
bond_mess = ENNMessage(d_model, d_bond, kernel_sz, enn_args, ann_args)
sc_mess = ENNMessage(d_model, d_sc_pair, kernel_sz, enn_args)
eucl_dist_attn = MultiHeadedEuclDistAttention(h, d_model)
graph_dist_attn = MultiHeadedGraphDistAttention(h, d_model)
self_attn = MultiHeadedSelfAttention(h, d_model, dropout)
ff = FullyConnectedNet(d_model, d_model, [d_ff], dropout=[dropout])
message_passing_layer = MessagePassingLayer(
d_model, bond_mess, sc_mess, dropout, N)
attending_layer = AttendingLayer(
d_model, c(eucl_dist_attn), c(graph_dist_attn), c(self_attn), c(ff),
dropout
)
self.projection = nn.Linear(d_atom, d_model)
self.encoder = Encoder(message_passing_layer, attending_layer, N)
self.write_head = MyCustomHead(
2 * d_model + d_sc_mol, d_ff, d_ff_contrib, norm=True)
def forward(self, atom_x, bond_x, sc_pair_x, sc_mol_x, eucl_dists,
graph_dists, angles, mask, bond_idx, sc_idx, angles_idx,
sc_types):
x = self.encoder(
self.projection(atom_x), bond_x, sc_pair_x, eucl_dists, graph_dists,
angles, mask, bond_idx, sc_idx, angles_idx
)
# for each sc constant in the batch select and concat the relevant pairs
# of atom states.
x = torch.cat(
[_gather_nodes(x, sc_idx[:,:,0], self.d_model),
_gather_nodes(x, sc_idx[:,:,1], self.d_model),
sc_mol_x], dim=-1
)
return self.write_head(x, sc_types)
|
robinniesert/kaggle-champs
|
model.py
|
model.py
|
py
| 17,738 |
python
|
en
|
code
| 48 |
github-code
|
6
|
17692957276
|
import os
import re
import asyncio
import time
from pyrogram import *
from pyrogram.types import *
from random import choice
from Heroku import cloner, ASSUSERNAME, BOT_NAME
from Heroku.config import API_ID, API_HASH
IMG = ["https://telegra.ph/file/cefd3211a5acdcd332415.jpg", "https://telegra.ph/file/30d743cea510c563af6e3.jpg", "https://telegra.ph/file/f7ae22a1491f530c05279.jpg", "https://telegra.ph/file/2f1c9c98452ae9a958f7d.jpg"]
MESSAGE = "Heya! I'm a music bot hoster/Cloner\n\nI can Host Your Bot On My Server within seconds\n\nTry /clone Token from @botfather"
@cloner.on_message(filters.private & filters.command("dkkdej"))
async def hello(client, message: Message):
buttons = [
[
InlineKeyboardButton("✘ ᴜᴘᴅᴀᴛᴇꜱ ᴄʜᴀɴɴᴇʟ", url="t.me/TheUpdatesChannel"),
],
[
InlineKeyboardButton("✘ ꜱᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ", url="t.me/TheSupportChat"),
],
]
reply_markup = InlineKeyboardMarkup(buttons)
await client.send_photo(message.chat.id, f"{choice(IMG)}", caption=MESSAGE, reply_markup=reply_markup)
##Copy from here
# © By Itz-Zaid Your motherfucker if uh Don't gives credits.
@cloner.on_message(filters.private & filters.command("clone"))
async def clone(bot, msg: Message):
chat = msg.chat
text = await msg.reply("Kullanım:\n\n /clone token")
cmd = msg.command
phone = msg.command[1]
try:
await text.edit("Yükleniyor... ")
# change this Directry according to ur repo
client = Client(":memory:", API_ID, API_HASH, bot_token=phone, plugins={"root": "Heroku.modules"})
await client.start()
user = await client.get_me()
await msg.reply(f"Başarıyla Başlatıldı @{user.username}! ✅ \n\n Şimdi Botunuzu ve Asistanı @{ASSUSERNAME} Grubunuza Ekleyin!")
except Exception as e:
await msg.reply(f"**HATA:** `{str(e)}`\nYeniden /start veriniz.")
#End
##This code fit with every pyrogram Codes just import then @Client Xyz!
|
Amahocaam/SmokeX
|
Heroku/plugins/clone.py
|
clone.py
|
py
| 2,063 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26436839942
|
from PIL import Image
imgx = 512
imgy = 512
image = Image.new("RGB",(imgx,imgy))
for x in range(imgx):
for y in range(imgy):
if ((x//64)%2 == 1) or ((x//64)%2 == 2) and (y//64)%2 == 1 or ((y//64)%2 == 2):
image.putpixel ((x,y), (0,0,0) )
else:
image.putpixel ((x,y), (250,0,0) )
image.save("demo_image.png", "PNG")
|
gbroady19/CS550
|
intropil.py
|
intropil.py
|
py
| 334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26625288006
|
from decimal import Decimal
from django import template
from livesettings import config_value
from product.utils import calc_discounted_by_percentage, find_best_auto_discount
from tax.templatetags import satchmo_tax
register = template.Library()
def sale_price(product):
"""Returns the sale price, including tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_sale_price(product)
else:
return untaxed_sale_price(product)
register.filter('sale_price', sale_price)
def untaxed_sale_price(product):
"""Returns the product unit price with the best auto discount applied."""
discount = find_best_auto_discount(product)
price = product.unit_price
if discount and discount.valid_for_product(product):
price = calc_discounted_by_percentage(price, discount.percentage)
return price
register.filter('untaxed_sale_price', untaxed_sale_price)
def taxed_sale_price(product):
"""Returns the product unit price with the best auto discount applied and taxes included."""
taxer = satchmo_tax._get_taxprocessor()
price = untaxed_sale_price(product)
price = price + taxer.by_price(product.taxClass, price)
return price
register.filter('taxed_sale_price', taxed_sale_price)
def discount_cart_total(cart, discount):
"""Returns the discounted total for this cart, with tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_cart_total(cart, discount)
else:
return untaxed_discount_cart_total(cart, discount)
register.filter('discount_cart_total', discount_cart_total)
def untaxed_discount_cart_total(cart, discount):
"""Returns the discounted total for this cart"""
total = Decimal('0.00')
for item in cart:
total += untaxed_discount_line_total(item, discount)
return total
register.filter('untaxed_discount_cart_total', untaxed_discount_cart_total)
def taxed_discount_cart_total(cart, discount):
"""Returns the discounted total for this cart with taxes included"""
total = Decimal('0.00')
for item in cart:
total += taxed_discount_line_total(item, discount)
return total
register.filter('taxed_discount_cart_total', taxed_discount_cart_total)
def discount_line_total(cartitem, discount):
"""Returns the discounted line total for this cart item, including tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_line_total(cartitem, discount)
else:
return untaxed_discount_line_total(cartitem, discount)
register.filter('discount_line_total', discount_line_total)
def untaxed_discount_line_total(cartitem, discount):
"""Returns the discounted line total for this cart item"""
price = cartitem.line_total
if discount and discount.valid_for_product(cartitem.product):
price = calc_discounted_by_percentage(price, discount.percentage)
return price
register.filter('untaxed_discount_line_total', untaxed_discount_line_total)
def taxed_discount_line_total(cartitem, discount):
"""Returns the discounted line total for this cart item with taxes included."""
price = untaxed_discount_line_total(cartitem, discount)
taxer = satchmo_tax._get_taxprocessor()
price = price + taxer.by_price(cartitem.product.taxClass, price)
return price
register.filter('taxed_discount_line_total', taxed_discount_line_total)
def discount_price(product, discount):
"""Returns the product price with the discount applied, including tax if that is the default.
Ex: product|discount_price:sale
"""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_price(product, discount)
else:
return untaxed_discount_price(product, discount)
register.filter('discount_price', discount_price)
def untaxed_discount_price(product, discount):
"""Returns the product price with the discount applied.
Ex: product|discount_price:sale
"""
up = product.unit_price
if discount and discount.valid_for_product(product):
pcnt = calc_discounted_by_percentage(up, discount.percentage)
return pcnt
else:
return up
register.filter('untaxed_discount_price', untaxed_discount_price)
def taxed_discount_price(product, discount):
"""Returns the product price with the discount applied, and taxes included.
Ex: product|discount_price:sale
"""
price = untaxed_discount_price(product, discount)
taxer = satchmo_tax._get_taxprocessor()
return price + taxer.by_price(product.taxClass, price)
register.filter('taxed_discount_price', taxed_discount_price)
def discount_ratio(discount):
"""Returns the discount as a ratio, making sure that the percent is under 1"""
pcnt = discount.percentage
if pcnt > 1:
pcnt = pcnt/100
return 1-pcnt
register.filter('discount_ratio', discount_ratio)
def discount_saved(product, discount):
"""Returns the amount saved by the discount, including tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_saved(product, discount)
else:
return untaxed_discount_saved(product, discount)
register.filter('discount_saved', discount_saved)
def untaxed_discount_saved(product, discount):
"""Returns the amount saved by the discount"""
if discount and discount.valid_for_product(product):
price = product.unit_price
discounted = untaxed_discount_price(product, discount)
saved = price - discounted
cents = Decimal("0.01")
return saved.quantize(cents)
else:
return Decimal('0.00')
register.filter('untaxed_discount_saved', untaxed_discount_saved)
def taxed_discount_saved(product, discount):
"""Returns the amount saved by the discount, after applying taxes."""
if discount and discount.valid_for_product(product):
price = product.unit_price
discounted = taxed_discount_price(product, discount)
saved = price - discounted
cents = Decimal("0.01")
return saved.quantize(cents)
else:
return Decimal('0.00')
register.filter('taxed_discount_saved', taxed_discount_saved)
|
dokterbob/satchmo
|
satchmo/apps/product/templatetags/satchmo_discounts.py
|
satchmo_discounts.py
|
py
| 6,222 |
python
|
en
|
code
| 30 |
github-code
|
6
|
18769293531
|
from random import randrange
with open("in0210_2.txt","w") as f:
for _ in range(20):
W,H = randrange(1,31),randrange(1,31)
f.writelines("%d %d\n"%(W,H))
arr = ["".join("....##ENWSX"[randrange(11)] for _ in range(W)) for _ in range(H)]
arr[0] = "".join("##X"[randrange(3)] for _ in range(W))
arr[-1] = "".join("##X"[randrange(3)] for _ in range(W))
for i in range(1,H-1):
arr[i] = "##X"[randrange(3)] + arr[i][1:-1] + "##X"[randrange(3)]
for n in arr:
f.writelines(n+"\n")
f.writelines("%d %d\n"%(0,0))
|
ehki/AOJ_challenge
|
python/0210_2.py
|
0210_2.py
|
py
| 590 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32644908087
|
"""Guide Eye 01 module"""
from functools import partial
from mgear.shifter.component import guide
from mgear.core import transform, pyqt
from mgear.vendor.Qt import QtWidgets, QtCore
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from maya.app.general.mayaMixin import MayaQDockWidget
from . import settingsUI as sui
# guide info
AUTHOR = "Jeremie Passerin, Miquel Campos"
URL = ", www.miquletd.com"
EMAIL = ", "
VERSION = [1, 0, 0]
TYPE = "eye_01"
NAME = "eye"
DESCRIPTION = "eye control rig"
##########################################################
# CLASS
##########################################################
class Guide(guide.ComponentGuide):
"""Component Guide Class"""
compType = TYPE
compName = NAME
description = DESCRIPTION
author = AUTHOR
url = URL
email = EMAIL
version = VERSION
def postInit(self):
"""Initialize the position for the guide"""
self.save_transform = ["root", "look"]
def addObjects(self):
"""Add the Guide Root, blade and locators"""
# eye guide
self.root = self.addRoot()
vTemp = transform.getOffsetPosition(self.root, [0, 0, 1])
self.look = self.addLoc("look", self.root, vTemp)
centers = [self.root, self.look]
self.dispcrv = self.addDispCurve("crv", centers)
def addParameters(self):
"""Add the configurations settings"""
self.pUpVDir = self.addEnumParam(
"upVectorDirection", ["X", "Y", "Z"], 1)
self.pIkRefArray = self.addParam("ikrefarray", "string", "")
self.pUseIndex = self.addParam("useIndex", "bool", False)
self.pParentJointIndex = self.addParam(
"parentJointIndex", "long", -1, None, None)
##########################################################
# Setting Page
##########################################################
class settingsTab(QtWidgets.QDialog, sui.Ui_Form):
"""The Component settings UI"""
def __init__(self, parent=None):
super(settingsTab, self).__init__(parent)
self.setupUi(self)
class componentSettings(MayaQWidgetDockableMixin, guide.componentMainSettings):
"""Create the component setting window"""
def __init__(self, parent=None):
self.toolName = TYPE
# Delete old instances of the componet settings window.
pyqt.deleteInstances(self, MayaQDockWidget)
super(self.__class__, self).__init__(parent=parent)
self.settingsTab = settingsTab()
self.setup_componentSettingWindow()
self.create_componentControls()
self.populate_componentControls()
self.create_componentLayout()
self.create_componentConnections()
def setup_componentSettingWindow(self):
self.mayaMainWindow = pyqt.maya_main_window()
self.setObjectName(self.toolName)
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle(TYPE)
self.resize(350, 350)
def create_componentControls(self):
return
def populate_componentControls(self):
"""Populate Controls
Populate the controls values from the custom attributes of the
component.
"""
# populate tab
self.tabs.insertTab(1, self.settingsTab, "Component Settings")
# populate component settings
self.settingsTab.upVectorDirection_comboBox.setCurrentIndex(
self.root.attr("upVectorDirection").get())
ikRefArrayItems = self.root.attr("ikrefarray").get().split(",")
for item in ikRefArrayItems:
self.settingsTab.ikRefArray_listWidget.addItem(item)
def create_componentLayout(self):
self.settings_layout = QtWidgets.QVBoxLayout()
self.settings_layout.addWidget(self.tabs)
self.settings_layout.addWidget(self.close_button)
self.setLayout(self.settings_layout)
def create_componentConnections(self):
cBox = self.settingsTab.upVectorDirection_comboBox
cBox.currentIndexChanged.connect(
partial(self.updateComboBox,
self.settingsTab.upVectorDirection_comboBox,
"upVectorDirection"))
self.settingsTab.ikRefArrayAdd_pushButton.clicked.connect(
partial(self.addItem2listWidget,
self.settingsTab.ikRefArray_listWidget,
"ikrefarray"))
self.settingsTab.ikRefArrayRemove_pushButton.clicked.connect(
partial(self.removeSelectedFromListWidget,
self.settingsTab.ikRefArray_listWidget,
"ikrefarray"))
self.settingsTab.ikRefArray_listWidget.installEventFilter(self)
def eventFilter(self, sender, event):
if event.type() == QtCore.QEvent.ChildRemoved:
if sender == self.settingsTab.ikRefArray_listWidget:
self.updateListAttr(sender, "ikrefarray")
return True
else:
return QtWidgets.QDialog.eventFilter(self, sender, event)
def dockCloseEventTriggered(self):
pyqt.deleteInstances(self, MayaQDockWidget)
|
mgear-dev/mgear4
|
release/scripts/mgear/shifter_classic_components/eye_01/guide.py
|
guide.py
|
py
| 5,095 |
python
|
en
|
code
| 209 |
github-code
|
6
|
71617385147
|
import pandas as pd
# Load the original CSV file
df = pd.read_csv('data.csv')
# Calculate the number of rows in each output file
num_rows = len(df) // 10
# Split the dataframe into 10 smaller dataframes
dfs = [df[i*num_rows:(i+1)*num_rows] for i in range(10)]
# Save each dataframe to a separate CSV file
for i, df in enumerate(dfs):
df.to_csv(f'small_file_{i}.csv', index=False)
|
charchitdahal/GameDay-Analytics-Challenge
|
convert.py
|
convert.py
|
py
| 388 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20216292952
|
from model.flyweight import Flyweight
from model.static.database import database
class Name(Flyweight):
def __init__(self,item_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.item_id = item_id
cursor = database.get_cursor(
"select * from eveNames where itemID={};".format(self.item_id))
row = cursor.fetchone()
self.item_name = row["itemName"]
self.category_id = row["categoryID"]
self.group_id = row["groupID"]
self.type_id = row["typeID"]
cursor.close()
|
Iconik/eve-suite
|
src/model/static/eve/name.py
|
name.py
|
py
| 655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40187097853
|
"""https://open.kattis.com/problems/piglatin"""
VOWEL = {'a', 'e', 'i', 'o', 'u', 'y'}
def is_begin_with_consonant(word, vowel=VOWEL):
return word[0] not in vowel
def is_begin_with_vowel(word, vowel=VOWEL):
return word[0] in vowel
def get_next_vowel_index(word, vowel=VOWEL):
index = 0
for i in word:
index += 1
if i in vowel:
return index
text = input('')
text = text.split()
ans = []
for word in text:
if is_begin_with_consonant(word):
index = get_next_vowel_index(word)
new_word = word[index-1:] + word[:index-1] + "ay"
ans.append(new_word)
else:
ans.append(word + "yay")
print(" ".join(ans))
|
roycehoe/algo-practice
|
practice/kattis/2/piglatin.py
|
piglatin.py
|
py
| 691 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40527670685
|
from django import forms
from django.forms import TextInput, SplitDateTimeWidget
class NumberInput(TextInput):
"""
HTML5 Number input
Left for backwards compatibility
"""
input_type = 'number'
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'form-control datepicker',
'size': '10', 'type': 'date'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'form-control timepicker',
'size': '8', 'type': 'time'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class LteAdminSplitDateTime (forms.SplitDateTimeWidget):
#template_name = 'admin/widgets/split_datetime.html'
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
|
ricardochaves/django-adminlte
|
adminlte/widgets.py
|
widgets.py
|
py
| 1,631 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12646834769
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
df = pd.read_csv('dividedsamples/training.csv')
dfval = pd.read_csv('dividedsamples/testing.csv')
train_features = df.copy()
test_features = dfval.copy()
train_labels = train_features.pop('price')
test_labels = test_features.pop('price')
regressor = LinearRegression()
regressor.fit(train_features, train_labels)
coeff_df = pd.DataFrame(regressor.coef_, train_features.columns, columns=['Coefficient'])
print(coeff_df)
y_pred = regressor.predict(test_features)
boi = pd.DataFrame({'Actual': test_labels, 'Predicted': y_pred})
print(boi)
|
WayneFerrao/autofocus
|
linreg.py
|
linreg.py
|
py
| 717 |
python
|
en
|
code
| 2 |
github-code
|
6
|
3026345716
|
# -*- coding: utf-8
# Testing facet-sphere interaction in periodic case.
# Pass, if the sphere is rolling from left to right through the period.
from woo import utils
sphereRadius=0.1
tc=0.001# collision time
en=0.3 # normal restitution coefficient
es=0.3 # tangential restitution coefficient
density=2700
frictionAngle=radians(35)#
params=utils.getViscoelasticFromSpheresInteraction(tc,en,es)
facetMat=O.materials.append(ViscElMat(frictionAngle=frictionAngle,**params))
sphereMat=O.materials.append(ViscElMat(density=density,frictionAngle=frictionAngle,**params))
#floor
n=5.
s=1./n
for i in range(0,n):
for j in range(0,n):
O.bodies.append([
utils.facet( [(i*s,j*s,0.1),(i*s,(j+1)*s,0.1),((i+1)*s,(j+1)*s,0.1)],material=facetMat),
utils.facet( [(i*s,j*s,0.1),((i+1)*s,j*s,0.1),((i+1)*s,(j+1)*s,0.1)],material=facetMat),
])
# Spheres
sphId=O.bodies.append([
utils.sphere( (0.5,0.5,0.2), 0.1, material=sphereMat),
])
O.bodies[sphId[-1]].state.vel=(0.5,0,0)
## Engines
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[Ig2_Facet_Sphere_ScGeom()],
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
[Law2_ScGeom_ViscElPhys_Basic()],
),
GravityEngine(gravity=[0,0,-9.81]),
NewtonIntegrator(damping=0),
]
O.periodic=True
O.cell.refSize=(1,1,1)
O.dt=.01*tc
O.saveTmp()
|
Azeko2xo/woodem
|
scripts/test-OLD/facet-sphere-ViscElBasic-peri.py
|
facet-sphere-ViscElBasic-peri.py
|
py
| 1,352 |
python
|
en
|
code
| 2 |
github-code
|
6
|
70943332987
|
_base_ = [
'./uvtr_lidar_base.py'
]
point_cloud_range = [-54, -54, -5.0, 54, 54, 3.0]
pts_voxel_size = [0.075, 0.075, 0.2]
voxel_size = [0.15, 0.15, 8]
lidar_sweep_num = 10
# For nuScenes we usually do 10-class detection
class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
model = dict(
pts_voxel_layer=dict(
point_cloud_range=point_cloud_range,
voxel_size=pts_voxel_size,
),
pts_middle_encoder=dict(
sparse_shape=[41, 1440, 1440]),
pts_bbox_head=dict(
bbox_coder=dict(
pc_range=point_cloud_range,
voxel_size=voxel_size),
),
# model training and testing settings
train_cfg=dict(pts=dict(
grid_size=[720, 720, 1],
voxel_size=voxel_size,
point_cloud_range=point_cloud_range,
assigner=dict(
pc_range=point_cloud_range))),
)
data_root = 'data/nuscenes/'
file_client_args = dict(backend='disk')
db_sampler = dict(
type='UnifiedDataBaseSampler',
data_root=data_root,
info_path=data_root + 'nuscenes_unified_dbinfos_train.pkl', # please change to your own database file
rate=1.0,
prepare=dict(
filter_by_difficulty=[-1],
filter_by_min_points=dict(
car=5,
truck=5,
bus=5,
trailer=5,
construction_vehicle=5,
traffic_cone=5,
barrier=5,
motorcycle=5,
bicycle=5,
pedestrian=5)),
classes=class_names,
sample_groups=dict(
car=2,
truck=3,
construction_vehicle=7,
bus=4,
trailer=6,
barrier=2,
motorcycle=6,
bicycle=6,
pedestrian=2,
traffic_cone=2),
points_loader=dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=[0, 1, 2, 3, 4],
file_client_args=file_client_args))
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=lidar_sweep_num-1,
use_dim=[0, 1, 2, 3, 4],
pad_empty_sweeps=True,
remove_close=True),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False),
dict(type='UnifiedObjectSample', db_sampler=db_sampler), # commit this for the last 2 epoch
dict(
type='UnifiedRotScaleTrans',
rot_range=[-0.3925, 0.3925],
scale_ratio_range=[0.95, 1.05]),
dict(
type='UnifiedRandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectNameFilter', classes=class_names),
dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(type='CollectUnified3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'points'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=lidar_sweep_num-1,
use_dim=[0, 1, 2, 3, 4],
pad_empty_sweeps=True,
remove_close=True),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(type='CollectUnified3D', keys=['points'])
# dict(
# type='MultiScaleFlipAug3D',
# img_scale=(1333, 800),
# pts_scale_ratio=1,
# # Add double-flip augmentation
# flip=True,
# pcd_horizontal_flip=True,
# pcd_vertical_flip=True,
# transforms=[
# dict(
# type='GlobalRotScaleTrans',
# rot_range=[0, 0],
# scale_ratio_range=[1., 1.],
# translation_std=[0, 0, 0]),
# dict(type='RandomFlip3D', sync_2d=False),
# dict(
# type='PointsRangeFilter', point_cloud_range=point_cloud_range),
# dict(
# type='DefaultFormatBundle3D',
# class_names=class_names,
# with_label=False),
# dict(type='Collect3D', keys=['points'])
# ])
]
optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.01)
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
|
dvlab-research/UVTR
|
projects/configs/uvtr/lidar_based/uvtr_l_v0075_h5.py
|
uvtr_l_v0075_h5.py
|
py
| 4,629 |
python
|
en
|
code
| 199 |
github-code
|
6
|
35987738740
|
import torch
from tsf_baselines.modeling import build_network
ALGORITHMS = [
'BasicTransformerEncoderDecoder'
]
def get_algorithm_class(algorithm_name):
"""Return the algorithm class with the given name."""
if algorithm_name not in globals():
raise NotImplementedError("Algorithm not found: {}".format(algorithm_name))
print('algorithm_name = {}'.format(algorithm_name))
return globals()[algorithm_name]
def build_algorithm(cfg):
algorithm = get_algorithm_class(cfg.ALGORITHM.NAME)(cfg)
return algorithm
class Algorithm(torch.nn.Module):
"""
A subclass of Algorithm implements a time series forecasting algorithm.
Subclasses should implement the following:
- update()
- predict()
"""
def __init__(self, cfg):
super(Algorithm, self).__init__()
self.cfg = cfg
self.device = self._acquire_device()
def _acquire_device(self):
# print('self.cfg = {}'.format(self.cfg))
if self.cfg.MODEL.USE_GPU:
# os.environ["CUDA_VISIBLE_DEVICES"] = str(self.cfg.MODEL.DEVICE) if not self.args.use_multi_gpu else self.args.devices
device = torch.device('cuda:{}'.format(self.cfg.MODEL.DEVICE))
print('Use GPU: cuda:{}'.format(self.cfg.MODEL.DEVICE))
else:
device = torch.device('cpu')
print('Use CPU')
return device
def update(self, minibatches):
"""
Perform one update step, given a list of (x, y) tuples for all
environments.
"""
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
class BasicTransformerEncDec(Algorithm):
def __init__(self, cfg):
super(BasicTransformerEncDec, self).__init__(cfg)
self.cfg = cfg
# Backbone
self.model = build_network(cfg)
# Loss function
self.loss_mse = torch.nn.MSELoss()
# Optimizer
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.cfg.SOLVER.BASE_LR)
# other declarations
pass
def _process_one_batch(self, dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark):
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float()
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
# decoder input
if self.cfg.DATASETS.PADDING == 0:
dec_inp = torch.zeros([batch_y.shape[0], self.cfg.MODEL.PRED_LEN, batch_y.shape[-1]]).float()
elif self.DATASETS.PADDING == 1:
dec_inp = torch.ones([batch_y.shape[0], self.cfg.MODEL.PRED_LEN, batch_y.shape[-1]]).float()
dec_inp = torch.cat([batch_y[:, :self.cfg.MODEL.LABEL_LEN, :], dec_inp], dim=1).float().to(self.device)
# encoder - decoder
if self.cfg.MODEL.OUTPUT_ATTENTION:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
if self.cfg.DATASETS.INVERSE:
outputs = dataset_object.inverse_transform(outputs)
f_dim = -1 if self.cfg.DATASETS.FEATURES == 'MS' else 0
batch_y = batch_y[:, -self.cfg.MODEL.PRED_LEN:, f_dim:].to(self.device)
return outputs, batch_y
def update(self, dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark):
outputs, batch_y = self._process_one_batch(dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark)
loss = self.loss_mse(outputs, batch_y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
def predict(self, dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark):
outputs, batch_y = self._process_one_batch(dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark)
return outputs, batch_y
|
zhaoyang10/time-series-forecasting-baselines
|
tsf_baselines/algorithm/build.py
|
build.py
|
py
| 3,942 |
python
|
en
|
code
| 3 |
github-code
|
6
|
14712079581
|
from typing import List, Optional
from fastapi import APIRouter, Header
from fastapi.exceptions import HTTPException
from server.models.subscription import (
ExchangeKlineSubscriptionRequest,
ExchangeSubscription,
ExchangeSubscriptionType,
)
router = APIRouter()
@router.get("/")
async def list(x_connection_id: str = Header()) -> List[ExchangeSubscription]:
return await ExchangeSubscription.find(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.connection == x_connection_id,
).to_list()
@router.get("/{symbol:path}/interval/{interval:path}/")
async def retrieve(
symbol: str,
interval: str,
x_connection_id: str = Header(),
) -> Optional[ExchangeSubscription]:
try:
return await ExchangeSubscription.find_one(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.symbol == symbol,
ExchangeSubscription.interval == interval,
ExchangeSubscription.connection == x_connection_id,
)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/{symbol:path}/")
async def list_symbol(
symbol: str,
x_connection_id: str = Header(),
) -> List[ExchangeSubscription]:
try:
return await ExchangeSubscription.find(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.symbol == symbol,
ExchangeSubscription.connection == x_connection_id,
).to_list()
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.post("/")
async def create(
request: ExchangeKlineSubscriptionRequest, x_connection_id: str = Header()
) -> ExchangeSubscription:
try:
existing_subscription = await ExchangeSubscription.find_one(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.symbol == request.symbol,
ExchangeSubscription.interval == request.interval,
ExchangeSubscription.connection == x_connection_id,
)
if existing_subscription:
return existing_subscription
subscription = ExchangeSubscription(
type=ExchangeSubscriptionType.KLINE,
interval=request.interval,
connection=x_connection_id,
symbol=request.symbol,
)
return await subscription.create()
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.delete("/")
async def destroy(
request: ExchangeKlineSubscriptionRequest, x_connection_id: str = Header()
):
try:
item = await ExchangeSubscription.find_one(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.interval == request.interval,
ExchangeSubscription.connection == x_connection_id,
ExchangeSubscription.symbol == request.symbol,
)
if not item:
raise HTTPException(status_code=400, detail="subscription not found")
await ExchangeSubscription.delete(item)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
|
masked-trader/raccoon-exchange-service
|
src/server/routes/subscription/kline.py
|
kline.py
|
py
| 3,263 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72784206907
|
# https://www.codewars.com/kata/5fc7d2d2682ff3000e1a3fbc
import re
def is_a_valid_message(message):
valid = all(int(n) == len(w) for n,w in re.findall(r'(\d+)([a-z]+)',message, flags = re.I))
# in the name of readability
if valid and re.match(r'((\d+)([a-z]+))*$',message, flags = re.I):
return True
else:
return False
# clever:
import re
def is_a_valid_message(message):
return all(n and int(n) == len(s) for n, s in re.findall("(\d*)(\D*)", message)[:-1])
# best practice wo re:
def is_a_valid_message(message):
size = 0
count = 0
for c in message:
if c.isnumeric():
if count == 0:
size = size * 10 + int(c)
else:
if count != size:
return False
count = 0
size = int(c)
else:
count = count + 1
return count == size
|
blzzua/codewars
|
6-kyu/message_validator.py
|
message_validator.py
|
py
| 910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14400394656
|
#!/usr/bin/env python3.8
def missing_element(arr1, arr2):
arr1.sort()
arr2.sort()
for num1, num2 in zip(arr1, arr2):
if num1 != num2:
return num1
return -1
def missing_element1(arr1, arr2):
count = {}
output = []
for i in arr1:
if i in count:
count[i] += 1
else:
count[i] = 1
for i in arr2:
if i in count:
count[i] -= 1
else:
count[i] = -1
for k in count:
if count[k] > 0:
output.append(k)
return output
import collections
def missing_element2(arr1, arr2):
count = collections.defaultdict(int)
output = []
for i in arr2:
count[i] += 1
for i in arr1:
if count[i] == 0:
output.append(i)
else:
count[i] -= 1
return output
def missing_element3(arr1, arr2):
return sum(arr1) - sum(arr2)
def missing_element4(arr1, arr2):
result = 0
for num in arr1+arr2:
result ^= num
return result
arr1 = [5,5,7,7]
arr2 = [5,7,7]
print(missing_element4(arr1,arr2))
print( ord("A")^ord("A"))
|
dnootana/Python
|
concepts/arrays/find_missing_element.py
|
find_missing_element.py
|
py
| 950 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9270774406
|
class Solution:
def smallestEqual(self, nums: List[int]) -> int:
output = []
for i in range(len(nums)):
if i % 10 == nums[i]:
output.append(i)
if len(output) > 0:
return min(output)
else:
return -1
|
nancyalaa/LeetCode
|
2057-smallest-index-with-equal-value/2057-smallest-index-with-equal-value.py
|
2057-smallest-index-with-equal-value.py
|
py
| 294 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10233608865
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING
from twitchio import User, PartialUser, Chatter, PartialChatter, Channel, Clip
from .errors import BadArgument
if TYPE_CHECKING:
from .core import Context
__all__ = (
"convert_Chatter",
"convert_Clip",
"convert_Channel",
"convert_PartialChatter",
"convert_PartialUser",
"convert_User",
)
async def convert_Chatter(ctx: Context, arg: str) -> Chatter:
"""
Converts the argument into a chatter in the chat. If the chatter is not found, BadArgument is raised.
"""
arg = arg.lstrip("@")
resp = [x for x in filter(lambda c: c.name == arg, ctx.chatters or tuple())]
if not resp:
raise BadArgument(f"The user '{arg}' was not found in {ctx.channel.name}'s chat.")
return resp[0]
async def convert_PartialChatter(ctx: Context, arg: str) -> PartialChatter:
"""
Converts the argument into a chatter in the chat. As opposed to Chatter converter, this will return a PartialChatter regardless of the cache state.
"""
return PartialChatter(ctx._ws, name=arg.lstrip("@"), channel=ctx.channel, message=None)
async def convert_Clip(ctx: Context, arg: str) -> Clip:
finder = re.search(r"(https://clips.twitch.tv/)?(?P<slug>.*)", arg)
if not finder:
raise RuntimeError(
"regex failed to match"
) # this should never ever raise, but its here to make type checkers happy
slug = finder.group("slug")
clips = await ctx.bot.fetch_clips([slug])
if not clips:
raise BadArgument(f"Clip '{slug}' was not found")
return clips[0]
async def convert_User(ctx: Context, arg: str) -> User:
"""
Similar to convert_Chatter, but fetches from the twitch API instead,
returning a :class:`twitchio.User` instead of a :class:`twitchio.Chatter`.
To use this, you most have a valid client id and API token or client secret
"""
arg = arg.lstrip("@")
user = await ctx.bot.fetch_users(names=[arg])
if not user:
raise BadArgument(f"User '{arg}' was not found.")
return user[0]
async def convert_PartialUser(ctx: Context, arg: str) -> User:
"""
This is simply a shorthand to :ref:`~convert_User`, as fetching from the api will return a full user model
"""
return await convert_User(ctx, arg)
async def convert_Channel(ctx: Context, arg: str) -> Channel:
if arg not in ctx.bot._connection._cache:
raise BadArgument(f"Not connected to channel '{arg}'")
return ctx.bot.get_channel(arg)
_mapping = {
User: convert_User,
PartialUser: convert_PartialUser,
Channel: convert_Channel,
Chatter: convert_Chatter,
PartialChatter: convert_PartialChatter,
Clip: convert_Clip,
}
|
PythonistaGuild/TwitchIO
|
twitchio/ext/commands/builtin_converter.py
|
builtin_converter.py
|
py
| 2,755 |
python
|
en
|
code
| 714 |
github-code
|
6
|
18660136090
|
import os
import sys
try:
from dreamberd import interprete
except ModuleNotFoundError:
sys.exit("Use -m keyword.")
from argparse import ArgumentParser
parser = ArgumentParser(
prog="DreamBerd Interpreter (Python)",
description="The perfect programming language.",
)
parser.add_argument("content", help="The file or code to run.")
args = parser.parse_args()
if os.path.exists(args.content):
with open(args.content, "r", encoding="utf-8") as file:
content: str = file.read()
else:
content = args.content
interprete(content)
|
AWeirdScratcher/dreamberd-interpreter
|
dreamberd/__main__.py
|
__main__.py
|
py
| 559 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36776570545
|
class Solution:
def combination(self, visited, idx):
if sum(visited) == self.target:
self.ans.append(list(visited))
return
if sum(visited) > self.target:
return
for i in range(idx, len(self.candidates)):
visited.append(self.candidates[i])
self.combination(visited, i)
visited.pop()
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
self.ans = []
self.target = target
self.candidates = candidates
self.combination([], 0)
return self.ans
|
nathy-min/Competitive_Programming2
|
0039-combination-sum/0039-combination-sum.py
|
0039-combination-sum.py
|
py
| 640 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3574016217
|
"""Python module for common workflows and library methods.
Authors: Prasad Hegde
"""
import os
import json
import pathlib
import inspect
import random
import string
class Workflows():
"""
Common Workflows and library methods
"""
def get_config_data(self, test_method):
"""
This routine retuns the config data specific to the test case
:param test_method: Name of the test method
:return: tuple containing global_config and test_args
"""
path = pathlib.Path(inspect.getfile(self.__class__)).parent.absolute()
config_path = os.path.join(path, "config.json")
with open(config_path) as f_in:
config_data = json.load(f_in)
return config_data["global_config"], config_data["test_args"][self.__class__.__name__]\
[test_method]
@staticmethod
def generate_new_email(length=16, suffix=None):
"""
This routine generates a new email id
:param length: Length of the email(int)
:param suffix: domain(str)
:return: email id (str)
"""
retval = ''.join(random.choice(string.ascii_lowercase + string.digits) \
for i in range(length))
return retval + suffix if suffix else retval
@staticmethod
def verify_response_header(expected_header, actual_header):
"""
This routine is used to validate expected response header against actual
:param expected_header: dict
:param actual_header: dict
:return: Boolean
"""
if not any(item in actual_header.items() for item in expected_header.items()):
return False
return True
@staticmethod
def verify_response_time(expected_response_time, actual_response_time):
"""
This routine is used to verify response time of api call
:param actual_response_time: sec
:return: Boolean
"""
if actual_response_time <= expected_response_time:
return True
return False
@staticmethod
def update_user_details(test_args, **kwargs):
"""
This Routine is used to build user details
:param test_args: test args of the test method
:param kwargs: first_name, last_name, dob, image_url, email_id
:return: user data (dict)
"""
first_name = kwargs.get('first_name', test_args["updated_user_details"]["first_name"])
last_name = kwargs.get('last_name', test_args["updated_user_details"]["last_name"])
dob = kwargs.get('dob', test_args["updated_user_details"]["dob"])
image_url = kwargs.get('image_url', test_args["updated_user_details"]["image_url"])
email = kwargs.get('email_id', None)
user_data = {"first_name": first_name, "last_name": last_name, "date_of_birth": dob,
"image_url": image_url}
if email:
user_data["email"] = email
return user_data
|
prasadhegde60/showoff.ie
|
workflows/workflows.py
|
workflows.py
|
py
| 3,021 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4432774556
|
import random
trials = 100000
budget = 1000
bet = 100
goal = 2 * budget
probability = 18/37
def gamblers_ruin(budget, bet, goal, probability):
current_budget = budget
num_bets = 0
while current_budget > 0 and current_budget < goal:
num_bets += 1
if random.random() < probability:
current_budget += bet
else:
current_budget -= bet
return (num_bets, current_budget)
results = [gamblers_ruin(budget, bet, goal, probability) for _ in range(trials)]
def probability_of_goal(results):
return sum([result[1] > 0 for result in results]) / len(results)
def expected_profit(results):
return sum([result[1] for result in results]) / len(results)
def extreme_runs(results):
bet_counts = [result[0] for result in results]
return (min(bet_counts), max(bet_counts))
print(probability_of_goal(results))
print(expected_profit(results))
print(probability_of_goal(results) * goal)
import numpy as np
def bet_percentiles(results, percentile):
bet_counts = [result[0] for result in results]
return np.percentile(bet_counts, percentile)
print(extreme_runs(results))
print(bet_percentiles(results, 95))
|
ander428/Computational-Economics-MGSC-532
|
In Class Code/GamblersRuin.py
|
GamblersRuin.py
|
py
| 1,224 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2018211678
|
#!/usr/bin/env python
import os
from applicake.app import WrappedApp
from applicake.apputils import validation
from applicake.coreutils.arguments import Argument
from applicake.coreutils.keys import Keys, KeyHelp
class Dss(WrappedApp):
"""
The DSS is often a initial workflow node. Requesting a workdir has thus the nice side effect
that the DSS generates the JOB_ID for the workflow
"""
ALLOWED_PREFIXES = ['getdataset', 'getmsdata', 'getexperiment']
TRUES = ['TRUE', 'T', 'YES', 'Y', '1']
def add_args(self):
return [
Argument(Keys.WORKDIR, KeyHelp.WORKDIR),
Argument(Keys.EXECUTABLE, "%s %s" % (KeyHelp.EXECUTABLE, self.ALLOWED_PREFIXES)),
Argument(Keys.DATASET_CODE, 'dataset code to get for getdataset or getmsdata'),
Argument('EXPERIMENT', 'experiment code to get for for getexperiment'),
Argument('DATASET_DIR', 'cache directory'),
Argument('DSS_KEEP_NAME', "for 'getmsdata' only: output keeps original file name if set to true "
"(otherwise it will be changed to samplecode~dscode.mzXXML)",
default='false')
]
def prepare_run(self, log, info):
executable = info[Keys.EXECUTABLE]
if not executable in self.ALLOWED_PREFIXES:
raise Exception("Executable %s must be one of [%s]" % (executable, self.ALLOWED_PREFIXES))
self.rfile = os.path.join(info[Keys.WORKDIR], executable + ".out")
outdir = info['DATASET_DIR']
if executable == 'getmsdata' and not info['DSS_KEEP_NAME'].upper() == 'TRUE':
koption = '-c'
else:
koption = ''
if info[Keys.EXECUTABLE] == 'getexperiment':
dscode_to_get = info['EXPERIMENT']
else:
dscode_to_get = info[Keys.DATASET_CODE]
command = "%s -v -r %s --out=%s %s %s" % (executable, self.rfile, outdir, koption, dscode_to_get)
return info, command
def validate_run(self, log, info, exit_code, out):
if "TypeError: expected str or unicode but got <type 'NoneType'>" in out:
raise RuntimeError("Dataset is archived. Please unarchive first!")
validation.check_exitcode(log, exit_code)
#KEY where to store downloaded file paths
default_keys = {'getmsdata': 'MZXML', 'getexperiment': 'SEARCH', 'getdataset': 'DSSOUT'}
key = default_keys[info[Keys.EXECUTABLE]]
#VALUE is a list of files or the mzXMLlink
dsfls = []
with open(self.rfile) as f:
for downloaded in [line.strip() for line in f.readlines()]:
ds, fl = downloaded.split("\t")
if ds == info[Keys.DATASET_CODE] or ds == info['EXPERIMENT']:
dsfls.append(fl)
#MZXML is expected only 1
if key == 'MZXML':
dsfls = dsfls[0]
log.debug("Adding %s to %s" % (dsfls, key))
info[key] = dsfls
return info
if __name__ == "__main__":
Dss.main()
|
lcb/applicake
|
appliapps/openbis/dss.py
|
dss.py
|
py
| 3,044 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73652373309
|
# 编写一个算法来判断一个数 n 是不是快乐数。
# “快乐数” 定义为:
# 对于一个正整数,每一次将该数替换为它每个位置上的数字的平方和。
# 然后重复这个过程直到这个数变为 1,也可能是 无限循环 但始终变不到 1。
# 如果这个过程 结果为 1,那么这个数就是快乐数。
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
visited = {n}
while (True):
next = 0
while (n != 0):
next += (n % 10) ** 2
n = n // 10
if next == 1:
return True
if next in visited:
return False
n = next
visited.add(next)
n = 2
a = Solution()
print(a.isHappy(n))
|
xxxxlc/leetcode
|
array/isHappy.py
|
isHappy.py
|
py
| 860 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
372063532
|
#!/usr/bin/env python3
import socketserver, socket, threading
upload = {}
download = {}
threadList = []
terminate = False
def shutdownServer():
global server
server.shutdown()
def handlethread(socketup, socketdown):
data = socketup.recv(512)
while data:
socketdown.send(data)
data = socketup.recv(512)
socketdown.close()
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
global download, upload, terminate, threadList, cv, cvHandle, handlethread, shutdownServer
# self.request is the TCP socket connected to the client
command = self.request.recv(1)
if(command.decode("utf-8") == "F"):
terminate = True
for i in range(len(threadList)):
threadList[i].join()
with cvHandle:
cvHandle.notify_all()
thread = threading.Thread(target = shutdownServer)
thread.start()
thread.join()
with cv:
cv.notify_all()
return
# thread.join()
#upload
elif command.decode("utf-8") == "P" and not terminate:
key = (self.request.recv(8)).decode('utf-8')
if key not in upload:
upload[key] = [self.request]
else:
upload[key].append(self.request)
if key not in download:
with cv:
while key not in download:
cv.wait()
if terminate:
return
socketup = upload[key].pop()
socketdown = download[key].pop()
if len(upload[key]) == 0:
del upload[key]
if len(download[key]) == 0:
del download[key]
thread = threading.Thread(target = handlethread, args = (socketup, socketdown, ))
threadList.append(thread)
thread.start()
else:
with cv:
cv.notify_all()
#download
elif command.decode("utf-8") == "G" and not terminate:
key = (self.request.recv(8)).decode('utf-8')
if key not in download:
download[key] = [self.request]
else:
download[key].append(self.request)
if key not in upload:
with cv:
while key not in upload:
cv.wait()
if terminate:
return
socketup = upload[key].pop()
socketdown = download[key].pop()
if len(upload[key]) == 0:
del upload[key]
if len(download[key]) == 0:
del download[key]
thread = threading.Thread(target = handlethread, args = (socketup, socketdown, ))
threadList.append(thread)
thread.start()
else:
with cv:
cv.notify_all()
cvHandle.acquire()
cvHandle.wait()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
HOST, PORT = "", 0
# Create the server, binding to localhost on port 9999
cv = threading.Condition()
cvHandle = threading.Condition()
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
print(server.server_address[1])
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
|
rainagan/cs456
|
a1/server.py
|
server.py
|
py
| 3,642 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4755956537
|
import argparse
import os
import sys
import time
import json
import pickle
from nltk.corpus import wordnet as wn
import numpy as np
import torch
import random
from aligner import Aligner
import log
logger = log.get_logger('root')
logger.propagate = False
def get_print_result(sample_group: dict, sample_result: dict, nonce_word):
candidates = sample_group['candidates']
info = sample_group['common_ancestor_info']
print_str = "\n===============================================================================================\n"
print_str += f'Number of Candidates: {len(candidates)}\n\n'
print_str += f"\nCommon {info['relation']} synset: {info['ancestor_name']}\n{wn.synset(info['ancestor_name']).definition()}\n\n"
# Use the line below for CoDA5
# print_str += f"\nCommon {info['relation']} synset: {info['ancestor_synset']}\n{info['ancestor_definition']}\n\n"
for candidate_no, candidate in enumerate(candidates):
print_str += f"\n{candidate_no+1}) Synset: {candidate['synset_name']} ({candidate['words_in_contexts'][0]})\n"
print_str += f"Definition: {candidate['definition']}\n"
print_str += f"Context: {candidate['contexts'][0]}\n"
print_str += "\n\n"
print_str += f"Predicted alignment: {sample_result['predicted_alignment']}\n"
print_str += f"Alignment Score: {sample_result['alignment_score']}\n"
return print_str
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# file parameters
parser.add_argument('--output_folder', default=None, type=str, required=True)
parser.add_argument('--data_file', default=None, type=str, required=True)
# parameters for the model to generate definitions
parser.add_argument('--model_cls', choices=['bert','roberta','gpt-2'], default='gpt-2')
parser.add_argument('--model_name', type=str, default='gpt2-medium')
parser.add_argument('--word_type', choices=['n','v'], default='n')
parser.add_argument('--nonce_word', type=str, default='bkatuhla')
parser.add_argument('--max_def_len', type=int, default=48,
help='maximum definition length after tokenization')
parser.add_argument('--max_batch_size', type=int, default=48,
help='maximum batch size')
parser.add_argument('--gpu_id', type=int, default=0,
help='id of the gpu that will be used during evaluations')
parser.add_argument('--seed', type=int, default=42,
help='seed for selecting random train samples for one of few shot evaluation')
args = parser.parse_args()
random.seed(args.seed)
with open(args.data_file, 'r') as handle:
CoDA = json.load(handle)
data = args.data_file.split("/")[-1][:-5] # don't take .json
print_file = f'{args.model_name}_on_{data}_{args.word_type}_nonce_{args.nonce_word}_some_results.txt'
save_file = f'{args.model_name}_on_{data}_{args.word_type}_nonce_{args.nonce_word}_results.pickle'
if not os.path.exists(args.output_folder):
os.mkdir(args.output_folder)
f_out = open(os.path.join(args.output_folder, print_file), "w", encoding='UTF-8')
f_out.close()
aligner = Aligner(
model_cls=args.model_cls,
pretrained_model=args.model_name,
gpu_id=args.gpu_id,
max_def_len=args.max_def_len,
max_batch_size=args.max_batch_size,
nonce_word=args.nonce_word,
word_type=args.word_type
)
sample_result = {}
sample_result[''] = []
sample_result['target_scores'] = []
sample_result['predicted_alignment'] = []
sample_result['alignment_score'] = []
all_results = []
sample_groups = CoDA[args.word_type]
for group_no, sample_group in enumerate(sample_groups):
target_scores, predicted_alignment, aligment_score = aligner.align(sample_group)
sample_result = {}
sample_result['target_scores'] = target_scores
sample_result['predicted_alignment'] = predicted_alignment
sample_result['alignment_score'] = aligment_score
all_results.append(sample_result)
if (group_no+1) % 25 == 0:
logger.info(f'{group_no+1}/{len(sample_groups)} synset groups processed')
if (group_no+1) % (len(sample_groups) // 20) == 0:
with open(os.path.join(args.output_folder, print_file), "a", encoding='UTF-8') as f_out:
f_out.write(get_print_result(sample_group, sample_result, args.nonce_word))
with open(os.path.join(args.output_folder, save_file), "wb") as handle:
pickle.dump(all_results, handle)
with open(os.path.join(args.output_folder, save_file), "wb") as handle:
pickle.dump(all_results, handle)
|
lksenel/CoDA21
|
Evaluation/evaluate_PLMs.py
|
evaluate_PLMs.py
|
py
| 4,847 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27094902824
|
from typing import Any, Callable, Dict
from torchvision import transforms as T
from rikai.types.vision import Image
"""
Adapted from https://github.com/pytorch/pytorch.github.io/blob/site/assets/hub/pytorch_vision_resnet.ipynb
""" # noqa E501
def pre_processing(options: Dict[str, Any]) -> Callable:
"""
All pre-trained models expect input images normalized in the same way, i.e.
mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W
are expected to be at least 224. The images have to be loaded in to a range
of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std =
[0.229, 0.224, 0.225].
"""
return T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
def post_processing(options: Dict[str, Any]) -> Callable:
def post_process_func(batch):
results = []
for result in batch:
results.append(result.detach().cpu().tolist())
return results
return post_process_func
OUTPUT_SCHEMA = "array<float>"
|
World-shi/rikai
|
python/rikai/contrib/torchhub/pytorch/vision/resnet.py
|
resnet.py
|
py
| 1,166 |
python
|
en
|
code
| null |
github-code
|
6
|
27453799884
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
queue = [root]
res = []
while queue:
res.append(sum([node.val for node in queue]) * 1.0 / len(queue))
temp = []
for node in queue:
if node.left:
temp.append(node.left)
if node.right:
temp.append(node.right)
queue = temp
return res
# T:O(n) - # of nodes in the tree
# S: O(m) - max num of nodes in one level
|
dreamebear/Coding-interviews
|
BFS/LC637-Avg-Level-Binary-Tree/LC637.py
|
LC637.py
|
py
| 794 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23515271400
|
# First Solution: 58476KB / 200ms / 674B
def BS(array,start,end):
while start<=end:
mid = (start+end)//2
if array[mid][1] == 1 and array[mid-1][1]==2: return mid
elif array[mid][1] == 2: start = mid+1
else: end = mid-1
return None
def Solution(data):
data = sorted(data.items(), key=lambda x:(-x[1],x[0]))
midpoint = BS(data,0,N+M-1)
if midpoint == None:
print(0)
else:
print(midpoint)
stdout.write('\n'.join(map(str,dict(data[:midpoint]).keys())))
from sys import stdin,stdout
from collections import Counter
N, M = map(int, stdin.readline().split())
data = Counter([stdin.readline().rstrip() for _ in range(N+M)])
Solution(data)
# ---------------------------------------------------------
# More Advanced Solution: 41884KB / 124ms / 272B
from sys import stdin,stdout
N, M = map(int, stdin.readline().split())
hear = set([stdin.readline().rstrip() for _ in range(N)])
see = set([stdin.readline().rstrip() for _ in range(M)])
common = sorted(list(hear & see))
print(len(common))
stdout.write('\n'.join(common))
|
Soohee410/Algorithm-in-Python
|
BOJ/Silver/1764.py
|
1764.py
|
py
| 1,094 |
python
|
en
|
code
| 6 |
github-code
|
6
|
17433175980
|
import os
import sys
import unittest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from chvote.Utils.Utils import AssertList
def CheckVerificationCodes(rc_bold, rc_prime_bold, s_bold):
"""
Algorithm 7.29: Checks if every displayed verification code RC'_i i matches with the return code
RC_s_i of the selected candidate s_i as printed on the code sheet.
Note that this algorithm is executed by humans.
Args:
rc_bold (list): Printed return codes
rc_prime_bold (list): Displayed return codes rc'
s_bold (list): Selections
Returns:
bool
"""
AssertList(rc_bold)
AssertList(rc_prime_bold)
AssertList(s_bold)
for i in range(len(s_bold)):
if rc_bold[s_bold[i]] != rc_prime_bold[i]:
return False
return True
class CheckVerificationCodesTest(unittest.TestCase):
def testCheckVerificationCodes(self):
self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
|
nextgenevoting/visualizer
|
backend/chvote/VotingClient/CheckVerificationCodes.py
|
CheckVerificationCodes.py
|
py
| 1,079 |
python
|
en
|
code
| 3 |
github-code
|
6
|
25760579432
|
from opencage.geocoder import OpenCageGeocode
import xlrd
import xlwt
from xlwt import Workbook
import pandas as pd
key ="fd4f682cf2014f3fbd321ab141454138"
# get api key from: https://opencagedata.com
geocoder = OpenCageGeocode(key)
loc = ("/Users/ashwinisriram/Documents/Lat long/corrected.xlsx")
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
sheet.cell_value(0, 0)
# Workbook is created
wb = Workbook()
# add_sheet is used to create sheet.
sheet1 = wb.add_sheet('Sheet 1')
# Define a dictionary containing data
data={'Customer_code':[],'City':[],'State':[]}
branch_district = ""
for r in range(4000,4500):
customer_code=str(sheet.cell_value(r, 0))
# sheet1.write(i, 1, sheet.cell_value(r, 1))
# sheet1.write(i, 2, sheet.cell_value(r, 2))
branch = str(sheet.cell_value(r, 1))
district= str(sheet.cell_value(r, 2))
data['Customer_code'].append(customer_code)
data['City'].append(branch)
data['State'].append(district)
df=pd.DataFrame(data)
# Convert the dictionary into DataFrame
# Observe the result
print(df)
list_lat = [] # create empty lists
list_long = []
link=[]
for index, row in df.iterrows(): # iterate over rows in dataframe
City = row['City']
State = row['State']
query = str(City)+','+str(State)
results = geocoder.geocode(query)
try:
lat = results[0]['geometry']['lat']
long = results[0]['geometry']['lng']
list_lat.append(lat)
list_long.append(long)
link.append("http://www.google.com/maps/place/"+str(lat)+','+str(long))
except IndexError:
list_lat.append('Nan')
list_long.append('Nan')
link.append("link unavailable")
# create new columns from lists
df['lat'] = list_lat
df['lon'] = list_long
df['link']=link
# function to find the coordinate
# of a given city
print(df)
# create excel writer object
writer = pd.ExcelWriter('output2.xlsx')
# write dataframe to excel
df.to_excel(writer,'sheet2')
# save the excel
writer.save()
print('DataFrame is written successfully to Excel File.')
|
Ashwini-Sriram/Latlong
|
alter.py
|
alter.py
|
py
| 2,116 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38703775254
|
import json
def getAdminAccount():
with open("./Data/admins.json", "r") as file:
JSON = file.read()
accounts = json.loads(JSON)
return accounts
def getAccount():
with open("./Data/accounts.json", "r") as file:
JSON = file.read()
accounts = json.loads(JSON)
return accounts
|
Coincoin008/DrawPlz-localhost-version-
|
getAccounts.py
|
getAccounts.py
|
py
| 333 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29528497286
|
#!/usr/bin/python3
import html
import re
import random
import json
import requests
from bs4 import BeautifulSoup
PATTERN = re.compile(r'/video(\d+)/.*')
def _fetch_page(page_number):
url = 'https://www.xvideos.com/porn/portugues/' + str(page_number)
res = requests.get(url)
if res.status_code != 200:
raise Exception('Response Error: ' + str(res.status_code))
return BeautifulSoup(res.text, 'html.parser')
def _find_videos(soup):
for element in soup.select('.thumb-block > .thumb-under > p > a'):
try:
reference = PATTERN.match(element['href']).group(1)
except AttributeError:
pass
yield element['title'], reference, element['href']
def _get_comments(video_ref):
url_mask = 'https://www.xvideos.com/threads/video-comments/get-posts/top/{0}/0/0'
url = url_mask.format(video_ref)
res = requests.post(url)
if res.status_code != 200:
raise Exception('Response Error: ' + str(res.status_code))
json_obj = json.loads(res.text)['posts']
json_obj = json_obj['posts']
try:
for attr, val in json_obj.items():
content = html.unescape(val['message'])
author = html.unescape(val['name'])
if '<a href=' not in content:
yield author, content
except (AttributeError, IndexError) as e:
raise IndexError
def choose_random_porn_comment():
for _ in range(10):
page = _fetch_page(random.randint(1, 40))
videos = _find_videos(page)
try:
title, reference, url = random.choice(list(videos))
comments = _get_comments(reference)
author, content = random.choice(list(comments))
except IndexError:
continue
return author, content, title, url
raise Exception('Too hard')
def _fetch_tag_page(page_number, tag):
if tag is not None:
url = 'https://www.xvideos.com/?k='+ str(tag) +'&p=' + str(page_number)
else:
url = 'https://www.xvideos.com/new/' + str(page_number)
res = requests.get(url)
if res.status_code != 200:
raise Exception('Response Error: ' + str(res.status_code))
return BeautifulSoup(res.text, 'html.parser')
def choose_random_video(tag=None):
for _ in range(10):
page = _fetch_tag_page(random.randint(1, 4), tag)
videos = _find_videos(page)
try:
title, reference, url = random.choice(list(videos))
url = 'https://xvideos.com'+url
return url
except IndexError:
raise Exception('Response Error: Bad search term')
raise Exception('Too hard')
def main():
# comment = choose_random_porn_comment()
# print(*comment, sep='\n')
video = choose_random_video()
print(video, sep='\n')
if __name__ == '__main__':
main()
|
marquesgabriel/bot-xvideos-telegram
|
xvideos.py
|
xvideos.py
|
py
| 2,846 |
python
|
en
|
code
| 2 |
github-code
|
6
|
41589531993
|
L = [("Rimm",100), ("FengFeng",95), ("Lisi", 87), ("Ubuntu", 111)]
def by_name(n):
x = sorted(n[0], key=str.lower)
return x
out = sorted(L, key=by_name)
print(out)
def by_score(n):
x = sorted(range(n[1]), key=abs)
return x
o = sorted(L, key=by_score, reverse=True)
print(o)
|
Wainemo/PythonPractice
|
tuple表示学生名和成绩 用sorted排序.py
|
tuple表示学生名和成绩 用sorted排序.py
|
py
| 295 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29608054037
|
''' NEURAL NETWORK FOR DIGIT DETECTION
This program is a shallow Neural Network that is trained to recognize digits written in a 5x3 box
'''
import random
import math
import csv
# Hyperparameters:
# speed (magnitude) at which algorithm adjusts weights
LEARNING_RATE = 0.3
# Feature –> individual and independent variables that measure a property or characteristic
# AKA input size (input is 3x5 box thus input size is 15)
FEATURE_SIZE = 15
# number of nodes in hidden layer
HIDDEN_SIZE = 12
# Class –> output variables in a classification model are referred to as classes (or labels)
# AKA output size (number of digits from 0-9 is 10 thus output size is 10)
CLASS_SIZE = 10
# range of random intiialized values centered around 0
INITIALIZATION_RANGE = 0.4
# number of iterations over all
NUM_ITERATIONS = 1000
# initializing weights from the trainingData and Validation .csv files using the csv library
trainingDataPath = 'NumberClassifierNN/TrainingData.csv'
with open(trainingDataPath, newline='') as f:
reader = csv.reader(f)
trainingData = [tuple(row) for row in reader]
validationDatapath = 'NumberClassifierNN/ValidationData.csv'
with open(validationDatapath, newline='') as f:
reader = csv.reader(f)
validationData = [tuple(row) for row in reader]
# fill weights with random numbers from -0.2 to 0.2
def initializeWeights(IKweights, KHweights):
for i in range(FEATURE_SIZE):
currentNodeWeights = []
for j in range(HIDDEN_SIZE):
currentNodeWeights.append(
random.random()*INITIALIZATION_RANGE - INITIALIZATION_RANGE/2)
IKweights.append(currentNodeWeights)
for i in range(HIDDEN_SIZE):
currentNodeWeights = []
for j in range(CLASS_SIZE):
currentNodeWeights.append(
random.random()*INITIALIZATION_RANGE - INITIALIZATION_RANGE/2)
KHweights.append(currentNodeWeights)
return IKweights, KHweights
# set the input nodes for a given training input
def setInputNodes(trainingExample):
inputNodes = []
currentExample = trainingExample[0]
for i in range(FEATURE_SIZE):
inputNodes.append(currentExample[i])
return inputNodes
# getting values of all nodes in j layer using the sum of previous i layer and weights from i to j
# xⱼ = ∑(xᵢ * wᵢ)
# actual operation is an implementation of dot product of matrices
def sumWeights(prevLayer, weights, currentLayerSize):
currentLayer = []
for i in range(currentLayerSize):
sum = 0
for j in range(len(prevLayer)):
sum += float(prevLayer[j])*weights[j][i]
currentLayer.append(sum)
return currentLayer
# sigmoid activation function that "squishes" the node values to be between 0 and 1
# used to allow slow and steady learning
def sigmoidFunction(nodes):
for i in range(len(nodes)):
power = pow(math.e, -1*nodes[i])
nodes[i] = 1/(1+power)
return nodes
# main forward propogation function
def forwardPropogation(currentExample, inputWeights, outputWeights):
inputNodes = setInputNodes(currentExample)
hiddenNodes = sumWeights(inputNodes, inputWeights, HIDDEN_SIZE)
hiddenNodes = sigmoidFunction(hiddenNodes)
outputNodes = sumWeights(hiddenNodes, outputWeights, CLASS_SIZE)
outputNodes = sigmoidFunction(outputNodes)
return inputNodes, outputNodes, hiddenNodes
# find the error for each output node: σ(h)
def outputLayerError(trainingExample, outputNodes):
error = []
for i in range(len(outputNodes)):
expectedOutputString = trainingExample[1]
expectedOutput = int(expectedOutputString[i])
actualOutput = outputNodes[i]
error.append(actualOutput * (1 - actualOutput)
* (expectedOutput - actualOutput))
return error
# find the error for each hidden node: σ(j)
def hiddenLayerError(hiddenNodes, outputNodes, outputWeights, outputError):
error = []
for i in range(len(hiddenNodes)):
alpha = 0
# get the value of alpha (calculated using all the output nodes that are connected from hidden node k: ⍺ = Σ(outputWeights * outputError))
for j in range(len(outputNodes)):
alpha += outputWeights[i][j]*outputError[j]
actualOutput = hiddenNodes[i]
error.append(actualOutput * (1 - actualOutput) * alpha)
return error
# adjust each weight between i and j layers using learning rate, error from j layer, and node value from i layer
def adjustWeights(learningRate, error, weights, prevNode):
for i in range(len(weights)):
for j in range(len(weights[i])):
deltaWeight = learningRate * error[j] * float(prevNode[i])
weights[i][j] = weights[i][j] + deltaWeight
return weights
# main backwards propogation function
def backwardsPropogation(currentExample, inputNodes, output, outputWeights, hiddenNodes, hiddenWeights):
# calulate error of final, output layer using the expected value
outputError = outputLayerError(currentExample, output)
# adjust weights from hidden layer to output layer using output layer error and values from hidden layer nodes
outputWeights = adjustWeights(
LEARNING_RATE, outputError, outputWeights, hiddenNodes)
# calculate error of hidden layer using output error
hiddenError = hiddenLayerError(
hiddenNodes, output, outputWeights, outputError)
# adjust weights from input layer to hiddne layer using hidden layer error and values from input layer nodes
hiddenWeights = adjustWeights(
LEARNING_RATE, hiddenError, hiddenWeights, inputNodes)
return hiddenWeights, outputWeights
def main():
# 1d arrays that hold the value of the nodes
hiddenNodes = []
outputNodes = []
# Input is layer I, hidden layer is layer K, output layer is layer H
# Each weights list is a 2D array with Row = stem node, Column = outgoing weight
IKweights = []
KHweights = []
# initialize weights with random numbers
IKweights, KHweights = initializeWeights(IKweights, KHweights)
inputWeights = IKweights
outputWeights = KHweights
'''
#TESTING FOR LOOP
for i in range(10):
#print("current example is " + str(currentExample))
inputNodes, outputNodes, hiddenNodes = forwardPropogation(trainingData[0], inputWeights, outputWeights)
inputWeights, outputWeights = backwardsPropogation(trainingData[0], inputNodes, outputNodes, outputWeights, hiddenNodes, inputWeights)
#print("current example is " + str(currentExample))
inputNodes, outputNodes, hiddenNodes = forwardPropogation(trainingData[0], inputWeights, outputWeights)
print("output for example " + str(0))
print(outputNodes)
'''
# main function that forward propogates and backward propogates for a set number of iterations
for i in range(NUM_ITERATIONS):
#shuffle the list to add randomization and prevent overfitting
random.shuffle(trainingData)
#train the model on each training example in the training set
for j in range(len(trainingData)):
exampleSelection = j
currentExample = trainingData[exampleSelection]
# forward propogate through the neural network using the input and weights to recieve an output
inputNodes, outputNodes, hiddenNodes = forwardPropogation(
currentExample, inputWeights, outputWeights)
# backward propogate through the data, finding the error and adjusting weights accordingly
inputWeights, outputWeights = backwardsPropogation(
currentExample, inputNodes, outputNodes, outputWeights, hiddenNodes, inputWeights)
showResults(validationData, inputWeights, outputWeights)
# function that returns the output of the NN and the expected output from the validation data
def returnResults(actualOutput, expectedOutput, example):
actualDigit = 0
expectedDigit = 0
confidence = 0
correctGuess = False
# Find the algorithm's predicted digit by finding the largest value in the output node
for i in range(len(actualOutput)):
if (actualOutput[i] > actualOutput[actualDigit]):
actualDigit = i
confidence = actualOutput[actualDigit] * 100
confidence = round(confidence, 2)
# Find the expected output by finding which output is 1
for i in range(len(expectedOutput[example][1])):
if (expectedOutput[example][1][i:i+1] == "1"):
expectedDigit = i
if (actualDigit == expectedDigit):
correctGuess = True
return actualDigit, expectedDigit, confidence, correctGuess
def showResults(validationData, inputWeights, outputWeights):
accuracy = 0
for i in range(len(validationData)):
correctnessString = 'incorreclty'
inputNodes, outputNodes, hiddenNodes = forwardPropogation(
validationData[i], inputWeights, outputWeights)
actualOutput, expectedOutput, confidence, correctness = returnResults(
outputNodes, validationData, i)
if (correctness == True):
accuracy += 1
correctnessString = 'correctly'
print(str(i) + ": model " + correctnessString + " predicted " + str(actualOutput) + " with a confidence of " + str(confidence)
+ "% actual digit was " + str(expectedOutput))
print("accuracy for the model was " +
str(accuracy/len(validationData)*100) + "%")
if __name__ == "__main__":
main()
|
DinglyCoder/Neural_Network_Digit_Classifier
|
Digit_NN.py
|
Digit_NN.py
|
py
| 9,468 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33828906255
|
import subprocess
import os
from concurrent.futures import ThreadPoolExecutor
spiders = subprocess.run(["scrapy", "list"], stdout=subprocess.PIPE, text=True).stdout.strip().split('\n')
def run_spider(spider_name):
log_file = f"logs/{spider_name}_logs.txt"
os.makedirs(os.path.dirname(log_file), exist_ok=True)
with open(log_file, 'w') as file:
file.truncate()
subprocess.run(
["scrapy", "crawl", spider_name],
check=True,
stdout=file,
stderr=subprocess.STDOUT
)
print(f"{spider_name} completed and logged to {log_file}")
for spider in spiders:
run_spider(spider)
|
hassaan-ahmed-brainx/enpak_scrappers
|
run_spiders.py
|
run_spiders.py
|
py
| 615 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71362184827
|
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'PythonPDB'
copyright = '2023, Benjamin McMaster'
author = 'Benjamin McMaster'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
|
benjiemc/PythonPDB
|
docs/conf.py
|
conf.py
|
py
| 864 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5308859090
|
# 예상등수 A , 실제등수 B, 불만도 = |A-B|
# 불만도 최소값 -> 학생등수매기기
# 1 5 3 1 2 -> sorting 1 1 2 3 5
# 1 2 3 4 5
n = int(input())
guess_rank = [int(input()) for _ in range(n)]
rank = [i for i in range(1, n+1)]
worst_score = 0
for a, b in zip(rank, sorted(guess_rank)):
worst_score += abs(a - b)
print(worst_score)
|
louisuss/Algorithms-Code-Upload
|
Python/FastCampus/greedy/2012.py
|
2012.py
|
py
| 352 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71455581947
|
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve
import matplotlib.pyplot as plt
from pathlib import Path
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import RobustScaler
class ClassifierComparison:
def __init__(self, dataset_path):
self.dataset_path = dataset_path
self.data = None
self.data_test = None
self.target_test = None
self.target = None
self.model_lr = None
self.model_knn = None
self.model_mlp = None
def load_data(self):
df = pd.read_csv(self.dataset_path)
df1 = df.copy(deep=True) # making a copy of the dataframe to protect original data
# define the columns to be encoded and scaled
categorical_columns = ['sex', 'exng', 'caa', 'cp', 'fbs', 'restecg', 'slp', 'thall']
continious_columns = ["age", "trtbps", "chol", "thalachh", "oldpeak"]
# encoding the categorical columns
df1 = pd.get_dummies(df1, columns=categorical_columns, drop_first=True)
# %%
# # defining the features and target
X = df1.drop(['output'], axis=1)
y = df1[['output']]
# # instantiating the scaler
scaler = RobustScaler()
# # scaling the continuous featuree
X[continious_columns] = scaler.fit_transform(
X[continious_columns]) # Transform the continious column to have unit variance and zero mean
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
self.data = X_train
self.target = y_train
self.data_test = X_test
self.target_test = y_test
def train_models(self):
self.model_lr = LogisticRegression()
self.model_lr.fit(self.data, self.target)
self.model_knn = KNeighborsClassifier()
self.model_knn.fit(self.data, self.target)
self.model_mlp = MLPClassifier()
self.model_mlp.fit(self.data.astype(float), self.target)
def predict(self):
lr_predictions = self.model_lr.predict(self.data_test)
knn_predictions = self.model_knn.predict(self.data_test)
mlp_predictions = self.model_mlp.predict(self.data_test)
return lr_predictions, knn_predictions, mlp_predictions
def compare_metrics(self):
lr_predictions, knn_predictions, mlp_predictions = self.predict()
lr_accuracy = accuracy_score(self.target_test, lr_predictions)
knn_accuracy = accuracy_score(self.target_test, knn_predictions)
mlp_accuracy = accuracy_score(self.target_test, mlp_predictions)
print(f"Logistic Regression Accuracy: {lr_accuracy:.4f}")
print(f"KNN Accuracy: {knn_accuracy:.4f}")
print(f"MLP Accuracy: {mlp_accuracy:.4f}")
def plot_roc_auc_curves(self):
lr_probabilities = self.model_lr.predict_proba(self.data_test)[:, 1]
knn_probabilities = self.model_knn.predict_proba(self.data_test)[:, 1]
mlp_probabilities = self.model_mlp.predict_proba(self.data_test)[:, 1]
lr_auc = roc_auc_score(self.target_test, lr_probabilities)
knn_auc = roc_auc_score(self.target_test, knn_probabilities)
mlp_auc = roc_auc_score(self.target_test, mlp_probabilities)
fpr_lr, tpr_lr, _ = roc_curve(self.target_test, lr_probabilities)
fpr_knn, tpr_knn, _ = roc_curve(self.target_test, knn_probabilities)
fpr_mlp, tpr_mlp, _ = roc_curve(self.target_test, mlp_probabilities)
plt.figure(figsize=(6, 3))
plt.plot(fpr_lr, tpr_lr, label=f"Logistic Regression (AUC = {lr_auc:.2f})")
plt.plot(fpr_knn, tpr_knn, label=f"KNN (AUC = {knn_auc:.2f})")
plt.plot(fpr_mlp, tpr_mlp, label=f"MLP (AUC = {mlp_auc:.2f})")
plt.plot([0, 1], [0, 1], linestyle='--', color='black')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves')
plt.legend()
plt.tight_layout()
plt.show()
# Usage Example
cc = ClassifierComparison(Path('/Users/anmolgorakshakar/Downloads/heart.csv'))
cc.load_data()
cc.train_models()
cc.compare_metrics()
cc.plot_roc_auc_curves()
|
anmol6536/binder_project
|
hw6_comparing_models.py
|
hw6_comparing_models.py
|
py
| 4,387 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42455383816
|
from model.contact import Contact
from model.group import Group
import random
def test_add_contact_to_group(app, db, check_ui):
# checks whether there are contacts available. If not - create one
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="Name for deletion"))
# check whether there are groups available. If not - create one
if len(db.get_group_list()) == 0:
app.group.create(Group(name="azazazaz"))
# check whether there are free contacts (not part of any group)
if len(db.get_contacts_not_in_any_of_groups()) == 0:
app.contact.create(Contact(firstname="Contact not in groups"))
# check whether there are free groups (do not have any contacts inside)
if len(db.get_groups_without_contacts()) == 0:
app.group.create(Group(name="Group without contacts"))
# choose random contact to add
random_contact = random.choice(db.get_contacts_not_in_any_of_groups())
# choose random group for contact addition
random_group = random.choice(db.get_groups_without_contacts())
# add contact to group
app.contact.add_contact_to_group(random_contact.id, random_group.id)
# assert that random_contact is in list of contacts of random_group
assert random_contact in db.get_contact_in_group(random_group)
|
1kpp/python_trainnig
|
test/test_add_contact_to_group.py
|
test_add_contact_to_group.py
|
py
| 1,312 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21881567037
|
# app.py
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import pandas as pd
import uvicorn
app = FastAPI()
# This middleware is required in order to accept requests from other domains such as a React app running on 'localhost:3000'
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
df = pd.read_csv('./sentiment140/training.1600000.processed.noemoticon.csv',
names=['score', 'id', 'date', 'col4', 'author', 'tweet'])
# Load your trained model
model = MultinomialNB()
vectorizer = CountVectorizer()
# Assuming df is your DataFrame from which you trained the model
X_train_vectorized = vectorizer.fit_transform(df['tweet'])
y_train = df['score'].astype(str)
model.fit(X_train_vectorized, y_train)
class SentimentRequest(BaseModel):
text: str
class SentimentResponse(BaseModel):
prediction: int
@app.post("/predict_sentiment", response_model=SentimentResponse)
def predict_sentiment(request: SentimentRequest):
global df # declare df as global so we can use it in this endpoint
text = request.text
# Vectorize the input text
text_vectorized = vectorizer.transform([text])
# Make prediction
prediction = model.predict(text_vectorized)[0]
# Append this prediction to the model
new_entry = pd.DataFrame({'score': [prediction], 'tweet': [text]})
df = pd.concat([df, new_entry], ignore_index=True)
df.to_csv('./sentiment140/training.1600000.processed.noemoticon.csv', index=False)
return {"prediction": prediction}
@app.get("/get_last_5")
def get_last_5():
global df
last_5_entries = df.tail(5)
last_5_entries_trimmed = last_5_entries[['score', 'tweet']].to_dict(orient='records')
return last_5_entries_trimmed
if __name__ == "__main__":
uvicorn.run("app:app", port=8000, reload=True)
|
pnavab/tweet-sentiment-NLP
|
app.py
|
app.py
|
py
| 2,079 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43391996015
|
import os;
from random import shuffle
def create_data_sets():
imagesPath = "ImagesTrain_Sorted/"
textFilesPath = ""
classifiers = {}
classifierIndex = 0
images_per_folder = []
for folderName in os.listdir(imagesPath):
classifiers.update({folderName: classifierIndex})
classifierIndex += 1
images_per_folder.append(len(os.listdir(imagesPath+"//"+folderName)))
images_per_class = min(images_per_folder)
percent_testing = .2
testingNum = percent_testing * images_per_class
trainingList = []
testingList = []
for folderName in os.listdir(imagesPath):
currPath = imagesPath.__add__("/%s" % folderName)
imageslist = os.listdir(currPath)
shuffle(imageslist)
for x in range(0, images_per_class):
entry = imagesPath + folderName + "/" + imageslist[x] + "," + str(classifiers[folderName])
if (x < testingNum):
testingList.append(entry)
else:
trainingList.append(entry)
shuffle(trainingList)
shuffle(testingList)
# Creates/overwrites existing text files for training and testing
training = open(textFilesPath + "train.txt", "w+")
testing = open(textFilesPath + "test.txt", "w+")
# writes to training and testing text files
for entry in trainingList:
training.write(entry + "\n")
for entry in testingList:
testing.write(entry + "\n")
# Closes the text files
training.close()
testing.close()
|
lealex262/Machine-Learned-Image-Classification-
|
createdatasets.py
|
createdatasets.py
|
py
| 1,519 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21312898418
|
import math
class Point:
"""
Represents a point in 2-D geometric space
"""
def __init__(self, x=0, y=0):
"""
Initializes the position of a new point.
If they are not specified, the point defaults to the origin
:param x: x coordinate
:param y: y coordinate
"""
self.x = x
self.y = y
def reset(self):
"""
Reset the point to the origin in 2D space
:return: nothing
"""
self.move(0, 0)
def move(self, x, y):
"""
Move a point to a new location in 2D space
:param x: x coordinate
:param y: y coordinate
:return: nothing
"""
self.x = x
self.y = y
def calculate_distance(self, other_point):
"""
Calculate distance between this point and parameter point
:other_point: second point
:return: the distance as a float
"""
return math.sqrt((other_point.x - self.x)**2 + (other_point.y - self.y)**2)
def main():
p1 = Point()
print(p1.x, p1.y)
p2 = Point(5, 8)
print(p2.x, p2.y)
p2.reset()
print(p2.x, p2.y)
p2.move(9, 10)
print(p2.x, p2.y)
print(p1.calculate_distance(p2))
assert(p2.calculate_distance(p1) == p1.calculate_distance(p2))
if __name__ == "__main__":
main()
exit(0)
|
matthewkirk203/intermediatePython
|
python/day1/testLiveTemplate.py
|
testLiveTemplate.py
|
py
| 1,363 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2794147606
|
#ENTRENAMIENTO DE RED CONVOLUCIONAL 2D - CLASIFICACION HSI
#Se utiliza PCA para reduccion dimensional y estraccion de caracteristicas espectrales. A la red convolucional se introduce
#una ventana sxs de la imagen original para la generacion de caracteristicas espaciales a partir de la convolucion.
#Se utiliza como capa de salida un clasificador tipo Multinomial logistic regression. Todas las capas utilizan entrenamiento supervisado.
import warnings
warnings.filterwarnings('ignore')
from package.cargarHsi import CargarHsi
from package.prepararDatos import PrepararDatos
from package.PCA import princiapalComponentAnalysis
from package.MorphologicalProfiles import morphologicalProfiles
from package.dataLogger import DataLogger
from keras import layers
from keras import models
from keras import regularizers
from keras import backend as K
import matplotlib.pyplot as plt
import numpy as np
import os
#CARGAR IMAGEN HSI Y GROUND TRUTH
numTest = 10
dataSet = 'Urban'
test = 'pcaCNN2D' # pcaCNN2D eapCNN2D
fe_eap = False # false for PCA, true for EAP
ventana = 9 #VENTANA 2D de PROCESAMIENTO
data = CargarHsi(dataSet)
imagen = data.imagen
groundTruth = data.groundTruth
#CREAR FICHERO DATA LOGGER
logger = DataLogger(fileName = dataSet, folder = test, save = True)
#ANALISIS DE COMPONENTES PRINCIPALES
pca = princiapalComponentAnalysis()
#imagenFE = pca.pca_calculate(imagen, varianza=0.95)
imagenFE = pca.pca_calculate(imagen, componentes=18)
print(imagenFE.shape)
#ESTIMACIÓN DE EXTENDED ATTRIBUTE PROFILES
if fe_eap:
mp = morphologicalProfiles()
imagenFE = mp.EAP(imagenFE, num_thresholds=6) #####################
print(imagenFE.shape)
OA = 0
vectOA = np.zeros(numTest)
for i in range(0, numTest):
#PREPARAR DATOS PARA ENTRENAMIENTO
preparar = PrepararDatos(imagenFE, groundTruth, False)
datosEntrenamiento, etiquetasEntrenamiento, datosValidacion, etiquetasValidacion = preparar.extraerDatos2D(50,30,ventana)
datosPrueba, etiquetasPrueba = preparar.extraerDatosPrueba2D(ventana)
#DEFINICION RED CONVOLUCIONAL
model = models.Sequential()
model.add(layers.Conv2D(48, (5, 5), kernel_regularizer=regularizers.l2(0.001),activation='relu', input_shape=(datosEntrenamiento.shape[1],datosEntrenamiento.shape[2],datosEntrenamiento.shape[3])))
#model.add(layers.MaxPooling2D((2,2), data_format='channels_last', strides=(1,1), padding='same'))
model.add(layers.Conv2D(96, (3, 3), kernel_regularizer=regularizers.l2(0.001),activation='relu'))
#model.add(layers.MaxPooling2D((2,2), data_format='channels_last', strides=(1,1), padding='same'))
model.add(layers.Conv2D(96, (3, 3), kernel_regularizer=regularizers.l2(0.001),activation='relu'))
#model.add(layers.MaxPooling2D((2,2), data_format='channels_last', strides=(1,1), padding='same'))
#CAPA FULLY CONNECTED
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1024, kernel_regularizer=regularizers.l2(0.001), activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1024, kernel_regularizer=regularizers.l2(0.001), activation='relu'))
#AÑADE UN CLASIFICADOR MLR EN EL TOPE DE LA CONVNET
model.add(layers.Dense(groundTruth.max()+1, activation='softmax'))
print(model.summary())
#ENTRENAMIENTO DE LA RED CONVOLUCIONAL
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(datosEntrenamiento,etiquetasEntrenamiento,epochs=35,batch_size=512,validation_data=(datosValidacion, etiquetasValidacion))
#EVALUAR MODELO
test_loss, test_acc = model.evaluate(datosPrueba, etiquetasPrueba)
vectOA[i] = test_acc
OA = OA+test_acc
#LOGGER DATOS DE ENTRENAMIENTO
logger.savedataTrain(history)
#GUARDAR MODELO DE RED CONVOLUCIONAL
model.save(os.path.join(logger.path,test+str(i)+'.h5'))
#GENERAR MAPA FINAL DE CLASIFICACIÓN
print('dataOA = '+ str(vectOA))
print('OA = '+ str(OA/numTest))
datosSalida = model.predict(datosPrueba)
datosSalida = preparar.predictionToImage(datosSalida)
#GRAFICAS
data.graficarHsi_VS(groundTruth, datosSalida)
data.graficar_history(history)
K.clear_session()
logger.close()
|
davidruizhidalgo/unsupervisedRemoteSensing
|
2_Redes Supervisadas/hsi_CNN2D.py
|
hsi_CNN2D.py
|
py
| 4,213 |
python
|
es
|
code
| 13 |
github-code
|
6
|
19399678889
|
class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
s = self.simulate(S)
t = self.simulate(T)
# print(s, t)
return s == t
def simulate(self,S):
arr = list(S)
result = []
for each in arr:
if each == '#':
if len(result):
result.pop()
else:
result.append(each)
return result
S = "ab#c"
T = "ad#c"
s = Solution().backspaceCompare(S, T)
print(s)
|
Yigang0622/LeetCode
|
backspaceCompare.py
|
backspaceCompare.py
|
py
| 507 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27259910110
|
"""We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""617. Merge Two Binary Trees
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution:
def mergeTrees(self, t1, t2):
if not t1:
return t2
if not t2:
return t1
t1.val += t2.val
t1.left = self.mergeTrees(t1.left, t2.left)
t1.right = self.mergeTrees(t1.right, t2.right)
return t1
|
asperaa/back_to_grind
|
Trees/merge_trees.py
|
merge_trees.py
|
py
| 537 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6018276646
|
# https://pypi.org/project/emoji/
from PIL import Image, ImageDraw, ImageFont
import emoji
print(emoji.demojize('Python 👍'))
print(emoji.emojize("Python :thumbs_up:"))
# 创建一个空白的RGBA模式图像
img = Image.new('RGBA', (200, 200), color='white')
# 获取Emoji字符的Unicode字符串
emoji_unicode = emoji.emojize(':thumbs_up:')
# 获取绘制对象和字体
draw = ImageDraw.Draw(img)
font_path = r'H:\Snippets\Program-Learning\Python\modules\utils\SourceHanSansCN-Medium.otf'
emoji_font_path = r'H:\Snippets\Program-Learning\Python\modules\utils\SEGUIEMJ.TTF'
font = ImageFont.truetype(font_path, 24, encoding='unic')
emoji_font = ImageFont.truetype(emoji_font_path, 24)
# 创建图像和绘图对象
image = Image.new("RGB", (200, 200), (255, 255, 255))
draw = ImageDraw.Draw(image)
# 绘制文本
text = "Hello, 世界 👍"
x, y = 50, 50
for char in text:
# 如果是 emoji
if char.encode('unicode_escape').decode('utf-8').startswith('\\U'):
draw.text((x, y+8), char, font=emoji_font,
fill=None, embedded_color=True)
size = draw.textlength(char, font=emoji_font)
else:
draw.text((x, y), char, font=font, fill=(0, 0, 0))
size = draw.textlength(char, font=font)
x += size
# 显示图像
# image.show()
original_list = ['❤❤️']
new_list = ["".join([char for char in string if char.encode(
'unicode_escape').decode('utf-8') != '\\ufe0f']) for string in original_list]
print(new_list)
|
Yuelioi/Program-Learning
|
Python/modules/utils/_emoji.py
|
_emoji.py
|
py
| 1,542 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42411367389
|
# -*- coding: utf-8 -*-
#
# File: BPDProgramable.py
#
# Copyright (c) 2011 by Conselleria de Infraestructuras y Transporte de la
# Generalidad Valenciana
#
# GNU General Public License (GPL)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
#
__author__ = """acv <[email protected]>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from Products.gvSIGbpd.config import *
##code-section module-header #fill in your manual code here
##/code-section module-header
schema = Schema((
ComputedField(
name='sumarioProgramas',
widget=ComputedField._properties['widget'](
label="Resumenes de Programas",
label2="Programs Summary",
description="Un resumen de los tipos del programas y el codigo fuente de los programas especificados en este elemento.",
description2="A summary of the programs types and source codes of the programs specified for this element.",
label_msgid='gvSIGbpd_BPDProgramable_attr_sumarioProgramas_label',
description_msgid='gvSIGbpd_BPDProgramable_attr_sumarioProgramas_help',
i18n_domain='gvSIGbpd',
),
exclude_from_values_paragraph="True",
description="Un resumen de los tipos del programas y el codigo fuente de los programas especificados en este elemento.",
duplicates="0",
label2="Programs Summary",
ea_localid="627",
derived="0",
precision=0,
collection="false",
styleex="volatile=0;IsLiteral=0;",
description2="A summary of the programs types and source codes of the programs specified for this element.",
ea_guid="{B50FFB52-5276-4846-931D-747DFFAB639C}",
exclude_from_values_form="True",
scale="0",
label="Resumenes de Programas",
length="0",
containment="Not Specified",
position="2",
owner_class_name="BPDProgramable",
expression="'; '.join( [ '%s:%s' %(( aPrg.getTipoPrograma() or ''), ( aPrg.getFuentePrograma() or ''))[:64] for aPrg in context.getProgramas()])",
computed_types="string",
exclude_from_copyconfig="True",
exclude_from_exportconfig="True"
),
ComputedField(
name='programas',
widget=ComputedWidget(
label="Programas",
label2="Programs",
description="Programas implementando el comportamiento de este elemento.",
description2="Programs implementing the behavior of this element.",
label_msgid='gvSIGbpd_BPDProgramable_contents_programas_label',
description_msgid='gvSIGbpd_BPDProgramable_contents_programas_help',
i18n_domain='gvSIGbpd',
),
contains_collections=False,
label2='Programs',
additional_columns=['tipoPrograma', 'fuentePrograma'],
label='Programas',
represents_aggregation=True,
description2='Programs implementing the behavior of this element.',
multiValued=1,
owner_class_name="BPDProgramable",
expression="context.objectValues(['BPDPrograma'])",
computed_types=['BPDPrograma'],
non_framework_elements=False,
description='Programas implementando el comportamiento de este elemento.'
),
),
)
##code-section after-local-schema #fill in your manual code here
##/code-section after-local-schema
BPDProgramable_schema = schema.copy()
##code-section after-schema #fill in your manual code here
##/code-section after-schema
class BPDProgramable:
"""
"""
security = ClassSecurityInfo()
allowed_content_types = ['BPDPrograma']
_at_rename_after_creation = True
schema = BPDProgramable_schema
##code-section class-header #fill in your manual code here
##/code-section class-header
# Methods
# end of class BPDProgramable
##code-section module-footer #fill in your manual code here
##/code-section module-footer
|
carrascoMDD/gvSIG-bpd
|
gvSIGbpd/BPDProgramable.py
|
BPDProgramable.py
|
py
| 4,774 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25055000504
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib, urllib2
from urllib2 import HTTPError
class HttpRequest(object):
'''
HTTP 호출
'''
def param_encode(self, params):
return urllib.urlencode(params)
def request(self, request_url, request_type="GET", params=None, headers={}, data=None):
opener = urllib2.build_opener(urllib2.HTTPHandler)
if params:
request_url = "{0}?{1}".format(request_url, self.param_encode(params))
req = urllib2.Request(request_url, data, headers)
req.get_method = lambda: request_type
try:
response = opener.open(req)
except HTTPError as hpe:
# fail
return HttpResponse(hpe)
return HttpResponse(response)
class HttpResponse(object):
__CONTENT__TYPE__ = "Content-Type"
def __init__(self, info):
self.info = info
self.headers = info.headers
self.code = info.code
self.msg = info.msg
self.body = info.read()
self.isok = True if not isinstance(info, HTTPError) else False
if __name__ == "__main__":
http = HttpRequest()
response = http.request("http://localhost:11000/oozie")
print(response.headers["Content-Type"])
|
developer-sdk/oozie-webservice-api
|
oozie-webservice-api/oozie/httputil2.py
|
httputil2.py
|
py
| 1,335 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2714558577
|
#상속
# : 클래스들이 중복된 코드를 제거하고 유지보수를
# 편하게 하기 위해 사용.
# 부모 클래스
class Monster:
def __init__(self,name, health, attack):
self.name = name
self.health = health
self.attack = attack
def move(self):
print(f"[{self.name}]지상에서 이동하기")
# 자식 클래스
class Wolf(Monster):
pass
class Shark(Monster):
def move(self):
print(f"[{self.name}]헤엄치기") # 메소드 오버라이딩
class Dragon(Monster):
def move(self):
print(f"[{self.name}]날기")
wolf = Wolf("울프",1500, 200)
wolf.move()
Shark = Shark("울프",1500, 200)
Shark.move()
Dragon = Dragon("울프",1500, 200)
Dragon.move()
|
Monsangter/pythonweb
|
python_basic/myvenv/chapter8/04.상속.py
|
04.상속.py
|
py
| 731 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
12746754821
|
import requests
from bs4 import BeautifulSoup as bs
import smtplib
URL = "https://www.amazon.in/9500-15-6-inch-i7-10750H-NVIDIA1650-Graphics/dp/B08BZPRWR5/ref=sr_1_4?dchild=1&keywords=Dell+XPS+15&qid=1602254565&sr=8-4"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36",
}
def check_price():
"""Check Price of Product"""
page = requests.get(URL, headers=headers)
soup = bs(page.content, 'html.parser')
title = soup.find(id="productTitle").get_text()
price = soup.find(id="priceblock_ourprice").get_text()
price.replace("₹", "")
price.replace(",", "")
price.replace(" ", "")
price.replace("\\xa;", "")
converted_price = float(price[0:5])
if (converted_price < 2000000.00):
send_mail()
print(converted_price)
def send_mail():
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login("[email protected]", "Baahubali")
subject = "Price went down for DELL XPS 15"
body = ("Check it out: https://www.amazon.de/Dell-Generation-i7-10750H-N18P-G62-DDR4-2933MHz/dp/B088TWQ1V8/ref=sr_1_1?__mk_de_DE=%C3%85M%C3%85%C5%BD%C3%95%C3%91&crid=1QODNEAOK4F7R&dchild=1&keywords=dell+xps+15&qid=1602067797&quartzVehicle=93-295&replacementKeywords=dell+xps&sprefix=Dell+XPS+%2Caps%2C281&sr=8-1")
msg = f"Subject: {subject} \n\n {body}"
server.sendmail(
"[email protected]",
"[email protected]",
msg
)
print("Email sent")
server.quit()
check_price()
|
Programmer-X31/PythonProjects
|
Project Amazon Scraper/main.py
|
main.py
|
py
| 1,679 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39485139620
|
#!/usr/bin/env python3
"""lc3_achi.py -- achivements module"""
import time
import dataset
from flask import session
lc3_achivements = [{'id': 0, 'hidden': False, 'title': 'Sleepless', 'desc': 'Submit a correct flag at night'},
{'id': 3, 'hidden': False, 'title': 'CTF Initiate', 'desc': 'Solve one problem'}
]
def check_and_set(dbfile, id):
db = dataset.connect('sqlite:///ctf.db')
achis = db.query('''select a.achi_id from achivements a
where a.user_id = :user_id''', user_id=session['user_id'])
achi = [a['achi_id'] for a in list(achis)]
if id in achi:
db.executable.close()
return False
else:
new_achi = dict(achi_id=id, user_id=session['user_id'])
db['achivements'].insert(new_achi)
db.executable.close()
return True
def chkachi(dbfile, action, **kw):
new_achi = False
return new_achi
|
Himanshukr000/CTF-DOCKERS
|
lc3ctf/examples/lc3achi/lc3achi.py
|
lc3achi.py
|
py
| 922 |
python
|
en
|
code
| 25 |
github-code
|
6
|
24199870907
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 12 15:21:43 2019
@author: Administrator
"""
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
m = len(obstacleGrid)
if not m:
return
n = len(obstacleGrid[0])
memo = [[0 for _ in range(n)] for _ in range(m)]
if obstacleGrid[-1][-1] == 0:
memo[-1][-1] = 1
else:
return 0
for i in range(n - 2, -1, -1):
if obstacleGrid[-1][i] == 1:
memo[-1][i] = 0
else:
memo[-1][i] = memo[-1][i + 1]
for i in range(m - 2, -1, -1):
if obstacleGrid[i][-1] == 1:
memo[i][-1] = 0
else:
memo[i][-1] = memo[i + 1][-1]
for i in range(m - 2, -1, -1):
for j in range(n - 2, -1, -1):
if obstacleGrid[i][j] == 1:
memo[i][j] = 0
else:
memo[i][j] = memo[i + 1][j] + memo[i][j + 1]
return memo[0][0]
if __name__ == '__main__':
obstacleGrid = [[0,0,0],[0,1,0],[0,0,0]]
res = Solution().uniquePathsWithObstacles(obstacleGrid)
|
AiZhanghan/Leetcode
|
code/63. Unique Paths II.py
|
63. Unique Paths II.py
|
py
| 1,255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73590131389
|
"""__author__ = 余婷"""
# 1.文本文件相关的操作
def get_text_file_content(file_path):
"""
获取文本文件的内容
:param file_path: 文件路径
:return: 文件的内容
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()
except FileNotFoundError:
print('Error:文件不存在!!!')
return None
def write_text_file(content, file_path):
"""
将数据写到指定的文本文件中
:param content: 写入的内容
:param file_path: 文件路径
:return: 返回写操作是否成功
"""
try:
with open(file_path, 'wb', encoding='utf-8') as f:
f.write(content)
f.write()
return True
except TypeError:
print('Error:内容必须是字符串!!!')
return False
if __name__ == '__main__':
get_text_file_content('./aa.txt')
write_text_file(str(True), './aa.txt')
|
gilgameshzzz/learn
|
day10Python_pygame/day10-管理系统/system/fileManager.py
|
fileManager.py
|
py
| 960 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
4041441314
|
__author__ = 'yueli'
import numpy as np
import matplotlib.pyplot as plt
from config.config import *
mrList = np.linspace(1, 13, 13)
negativeList = [-1, -1, -10, -10, -1, -1, -1, -10 ,-1, -1, -1, -10, -10]
noMapReplyList = np.linspace(0, 0, 13)
rlocSet1 = [-10, 1, 1, 1, -10, -10, 1, 1, 1, -10, 1, 1, 1]
rlocSet2 = [-10, 2, 2, 2, 2, 2, 2, 2, 2, -10, 2, 2, 2]
rlocSet3 = [-10, -10, 3, 3, -10, -10, -10, 3, -10, -10, -10, 3, 3]
plt.xlim(0.5, 13.5)
plt.ylim(-1.5, 3.5)
plt.scatter(mrList,negativeList, color = 'blue')
plt.scatter(mrList,noMapReplyList, color = 'yellow')
plt.scatter(mrList,rlocSet1, color = 'purple')
plt.scatter(mrList,rlocSet2, color = 'red')
plt.scatter(mrList,rlocSet3, color = 'green')
plt.xlabel("13 different Map Resolvers")
plt.ylabel("Responses from MRs")
plt.title("Responses from 13 MRs for EID-153.16.49.112 at liege(by MR)")
plt.xticks(mrList, ['MR1', 'MR2', 'MR3', 'MR4', 'MR5', 'MR6', 'MR7', 'MR8', 'MR9', 'MR10', 'MR11', 'MR12', 'MR13', 'MR14', 'MR15'])
plt.yticks([-1, 0, 1, 2, 3], ['Negative Reply', 'No Map Reply', '82.121.231.67', '192.168.1.66', '132.227.85.231'])
# plt.savefig(
# os.path.join(PLOT_DIR, 'Plot_variable_MR', 'Plot_variable_MR.eps'),
# dpi=300,
# transparent=True
# )
plt.show()
|
hansomesong/TracesAnalyzer
|
Plot/Plot_variable_MR/Plot_variable_MR.py
|
Plot_variable_MR.py
|
py
| 1,248 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35031212514
|
import io
import webbrowser
video_links_dict = {}
video_links = open("C:\\users\lcrum\documents\mypythonprograms\musicvideolinks.txt", "r")
for i in video_links:
var = video_links.readline()
varKey, varExcess, varVal = var.partition(' ')
video_links_dict[varKey] = varVal
video_links.close
def song_selection():
userSelection = input("Please enter the title from the list you want to play: ")
song_url = video_links_dict[userSelection]
webbrowser.open(song_url)
def song_list():
for titles in video_links_dict:
print(titles)
print("Welcome to the kids music videos.Here is a selection of movie titles you can choose from ...")
song_list()
song_selection()
user_response = input("Do you wish to select another video?: Y or N")
while user_response == 'Y':
song_list()
song_selection()
else:
print("Thanks for using our service. Bye for now")
|
linseycurrie/FilmMusicVideos
|
MusicVideos.py
|
MusicVideos.py
|
py
| 893 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18266500320
|
"""This is Slate's Linear Algebra Compiler. This module is
responsible for generating C++ kernel functions representing
symbolic linear algebra expressions written in Slate.
This linear algebra compiler uses both Firedrake's form compiler,
the Two-Stage Form Compiler (TSFC) and COFFEE's kernel abstract
syntax tree (AST) optimizer. TSFC provides this compiler with
appropriate kernel functions (in C) for evaluating integral
expressions (finite element variational forms written in UFL).
COFFEE's AST base helps with the construction of code blocks
throughout the kernel returned by: `compile_expression`.
The Eigen C++ library (http://eigen.tuxfamily.org/) is required, as
all low-level numerical linear algebra operations are performed using
this templated function library.
"""
from coffee import base as ast
from firedrake.constant import Constant
from firedrake.tsfc_interface import SplitKernel, KernelInfo
from firedrake.slate.slac.kernel_builder import LocalKernelBuilder
from firedrake import op2
from itertools import chain
from pyop2.utils import get_petsc_dir
from pyop2.datatypes import as_cstr
from tsfc.parameters import SCALAR_TYPE
import firedrake.slate.slate as slate
import numpy as np
__all__ = ['compile_expression']
PETSC_DIR = get_petsc_dir()
cell_to_facets_dtype = np.dtype(np.int8)
def compile_expression(slate_expr, tsfc_parameters=None):
"""Takes a Slate expression `slate_expr` and returns the appropriate
:class:`firedrake.op2.Kernel` object representing the Slate expression.
:arg slate_expr: a :class:'TensorBase' expression.
:arg tsfc_parameters: an optional `dict` of form compiler parameters to
be passed onto TSFC during the compilation of
ufl forms.
Returns: A `tuple` containing a `SplitKernel(idx, kinfo)`
"""
if not isinstance(slate_expr, slate.TensorBase):
raise ValueError("Expecting a `TensorBase` object, not %s" % type(slate_expr))
# TODO: Get PyOP2 to write into mixed dats
if slate_expr.is_mixed:
raise NotImplementedError("Compiling mixed slate expressions")
if len(slate_expr.ufl_domains()) > 1:
raise NotImplementedError("Multiple domains not implemented.")
# If the expression has already been symbolically compiled, then
# simply reuse the produced kernel.
if slate_expr._metakernel_cache is not None:
return slate_expr._metakernel_cache
# Create a builder for the Slate expression
builder = LocalKernelBuilder(expression=slate_expr,
tsfc_parameters=tsfc_parameters)
# Keep track of declared temporaries
declared_temps = {}
statements = []
# Declare terminal tensor temporaries
terminal_declarations = terminal_temporaries(builder, declared_temps)
statements.extend(terminal_declarations)
# Generate assembly calls for tensor assembly
subkernel_calls = tensor_assembly_calls(builder)
statements.extend(subkernel_calls)
# Create coefficient temporaries if necessary
if builder.action_coefficients:
coefficient_temps = coefficient_temporaries(builder, declared_temps)
statements.extend(coefficient_temps)
# Create auxiliary temporaries if necessary
if builder.aux_exprs:
aux_temps = auxiliary_temporaries(builder, declared_temps)
statements.extend(aux_temps)
# Generate the kernel information with complete AST
kinfo = generate_kernel_ast(builder, statements, declared_temps)
# Cache the resulting kernel
idx = tuple([0]*slate_expr.rank)
kernel = (SplitKernel(idx, kinfo),)
slate_expr._metakernel_cache = kernel
return kernel
def generate_kernel_ast(builder, statements, declared_temps):
"""Glues together the complete AST for the Slate expression
contained in the :class:`LocalKernelBuilder`.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information.
:arg statements: A list of COFFEE objects containing all
assembly calls and temporary declarations.
:arg declared_temps: A `dict` containing all previously
declared temporaries.
Return: A `KernelInfo` object describing the complete AST.
"""
slate_expr = builder.expression
if slate_expr.rank == 0:
# Scalars are treated as 1x1 MatrixBase objects
shape = (1,)
else:
shape = slate_expr.shape
# Now we create the result statement by declaring its eigen type and
# using Eigen::Map to move between Eigen and C data structs.
statements.append(ast.FlatBlock("/* Map eigen tensor into C struct */\n"))
result_sym = ast.Symbol("T%d" % len(declared_temps))
result_data_sym = ast.Symbol("A%d" % len(declared_temps))
result_type = "Eigen::Map<%s >" % eigen_matrixbase_type(shape)
result = ast.Decl(SCALAR_TYPE, ast.Symbol(result_data_sym, shape))
result_statement = ast.FlatBlock("%s %s((%s *)%s);\n" % (result_type,
result_sym,
SCALAR_TYPE,
result_data_sym))
statements.append(result_statement)
# Generate the complete c++ string performing the linear algebra operations
# on Eigen matrices/vectors
statements.append(ast.FlatBlock("/* Linear algebra expression */\n"))
cpp_string = ast.FlatBlock(metaphrase_slate_to_cpp(slate_expr,
declared_temps))
statements.append(ast.Incr(result_sym, cpp_string))
# Generate arguments for the macro kernel
args = [result, ast.Decl("%s **" % SCALAR_TYPE, builder.coord_sym)]
# Orientation information
if builder.oriented:
args.append(ast.Decl("int **", builder.cell_orientations_sym))
# Coefficient information
expr_coeffs = slate_expr.coefficients()
for c in expr_coeffs:
if isinstance(c, Constant):
ctype = "%s *" % SCALAR_TYPE
else:
ctype = "%s **" % SCALAR_TYPE
args.extend([ast.Decl(ctype, csym) for csym in builder.coefficient(c)])
# Facet information
if builder.needs_cell_facets:
args.append(ast.Decl("%s *" % as_cstr(cell_to_facets_dtype),
builder.cell_facet_sym))
# NOTE: We need to be careful about the ordering here. Mesh layers are
# added as the final argument to the kernel.
if builder.needs_mesh_layers:
args.append(ast.Decl("int", builder.mesh_layer_sym))
# Macro kernel
macro_kernel_name = "compile_slate"
stmts = ast.Block(statements)
macro_kernel = ast.FunDecl("void", macro_kernel_name, args,
stmts, pred=["static", "inline"])
# Construct the final ast
kernel_ast = ast.Node(builder.templated_subkernels + [macro_kernel])
# Now we wrap up the kernel ast as a PyOP2 kernel and include the
# Eigen header files
include_dirs = builder.include_dirs
include_dirs.extend(["%s/include/eigen3/" % d for d in PETSC_DIR])
op2kernel = op2.Kernel(kernel_ast,
macro_kernel_name,
cpp=True,
include_dirs=include_dirs,
headers=['#include <Eigen/Dense>',
'#define restrict __restrict'])
# Send back a "TSFC-like" SplitKernel object with an
# index and KernelInfo
kinfo = KernelInfo(kernel=op2kernel,
integral_type=builder.integral_type,
oriented=builder.oriented,
subdomain_id="otherwise",
domain_number=0,
coefficient_map=tuple(range(len(expr_coeffs))),
needs_cell_facets=builder.needs_cell_facets,
pass_layer_arg=builder.needs_mesh_layers)
return kinfo
def auxiliary_temporaries(builder, declared_temps):
"""Generates statements for assigning auxiliary temporaries
for nodes in an expression with "high" reference count.
Expressions which require additional temporaries are provided
by the :class:`LocalKernelBuilder`.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information.
:arg declared_temps: A `dict` containing all previously
declared temporaries. This dictionary
is updated as auxiliary expressions
are assigned temporaries.
"""
statements = [ast.FlatBlock("/* Auxiliary temporaries */\n")]
results = [ast.FlatBlock("/* Assign auxiliary temps */\n")]
for exp in builder.aux_exprs:
if exp not in declared_temps:
t = ast.Symbol("auxT%d" % len(declared_temps))
result = metaphrase_slate_to_cpp(exp, declared_temps)
tensor_type = eigen_matrixbase_type(shape=exp.shape)
statements.append(ast.Decl(tensor_type, t))
statements.append(ast.FlatBlock("%s.setZero();\n" % t))
results.append(ast.Assign(t, result))
declared_temps[exp] = t
statements.extend(results)
return statements
def coefficient_temporaries(builder, declared_temps):
"""Generates coefficient temporary statements for assigning
coefficients to vector temporaries.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information.
:arg declared_temps: A `dict` keeping track of all declared
temporaries. This dictionary is updated
as coefficients are assigned temporaries.
Action computations require creating coefficient temporaries to
compute the matrix-vector product. The temporaries are created by
inspecting the function space of the coefficient to compute node
and dof extents. The coefficient is then assigned values by looping
over both the node extent and dof extent (double FOR-loop). A double
FOR-loop is needed for each function space (if the function space is
mixed, then a loop will be constructed for each component space).
The general structure of each coefficient loop will be:
FOR (i1=0; i1<node_extent; i1++):
FOR (j1=0; j1<dof_extent; j1++):
wT0[offset + (dof_extent * i1) + j1] = w_0_0[i1][j1]
wT1[offset + (dof_extent * i1) + j1] = w_1_0[i1][j1]
.
.
.
where wT0, wT1, ... are temporaries for coefficients sharing the
same node and dof extents. The offset is computed based on whether
the function space is mixed. The offset is always 0 for non-mixed
coefficients. If the coefficient is mixed, then the offset is
incremented by the total number of nodal unknowns associated with
the component spaces of the mixed space.
"""
statements = [ast.FlatBlock("/* Coefficient temporaries */\n")]
i_sym = ast.Symbol("i1")
j_sym = ast.Symbol("j1")
loops = [ast.FlatBlock("/* Loops for coefficient temps */\n")]
for (nodes, dofs), cinfo_list in builder.action_coefficients.items():
# Collect all coefficients which share the same node/dof extent
assignments = []
for cinfo in cinfo_list:
fs_i = cinfo.space_index
offset = cinfo.offset_index
c_shape = cinfo.shape
actee = cinfo.coefficient
if actee not in declared_temps:
# Declare and initialize coefficient temporary
c_type = eigen_matrixbase_type(shape=c_shape)
t = ast.Symbol("wT%d" % len(declared_temps))
statements.append(ast.Decl(c_type, t))
statements.append(ast.FlatBlock("%s.setZero();\n" % t))
declared_temps[actee] = t
# Assigning coefficient values into temporary
coeff_sym = ast.Symbol(builder.coefficient(actee)[fs_i],
rank=(i_sym, j_sym))
index = ast.Sum(offset,
ast.Sum(ast.Prod(dofs, i_sym), j_sym))
coeff_temp = ast.Symbol(t, rank=(index,))
assignments.append(ast.Assign(coeff_temp, coeff_sym))
# Inner-loop running over dof extent
inner_loop = ast.For(ast.Decl("unsigned int", j_sym, init=0),
ast.Less(j_sym, dofs),
ast.Incr(j_sym, 1),
assignments)
# Outer-loop running over node extent
loop = ast.For(ast.Decl("unsigned int", i_sym, init=0),
ast.Less(i_sym, nodes),
ast.Incr(i_sym, 1),
inner_loop)
loops.append(loop)
statements.extend(loops)
return statements
def tensor_assembly_calls(builder):
"""Generates a block of statements for assembling the local
finite element tensors.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information and
assembly calls.
"""
statements = [ast.FlatBlock("/* Assemble local tensors */\n")]
# Cell integrals are straightforward. Just splat them out.
statements.extend(builder.assembly_calls["cell"])
if builder.needs_cell_facets:
# The for-loop will have the general structure:
#
# FOR (facet=0; facet<num_facets; facet++):
# IF (facet is interior):
# *interior calls
# ELSE IF (facet is exterior):
# *exterior calls
#
# If only interior (exterior) facets are present,
# then only a single IF-statement checking for interior
# (exterior) facets will be present within the loop. The
# cell facets are labelled `1` for interior, and `0` for
# exterior.
statements.append(ast.FlatBlock("/* Loop over cell facets */\n"))
int_calls = list(chain(*[builder.assembly_calls[it_type]
for it_type in ("interior_facet",
"interior_facet_vert")]))
ext_calls = list(chain(*[builder.assembly_calls[it_type]
for it_type in ("exterior_facet",
"exterior_facet_vert")]))
# Compute the number of facets to loop over
domain = builder.expression.ufl_domain()
if domain.cell_set._extruded:
num_facets = domain.ufl_cell()._cells[0].num_facets()
else:
num_facets = domain.ufl_cell().num_facets()
if_ext = ast.Eq(ast.Symbol(builder.cell_facet_sym,
rank=(builder.it_sym,)), 0)
if_int = ast.Eq(ast.Symbol(builder.cell_facet_sym,
rank=(builder.it_sym,)), 1)
body = []
if ext_calls:
body.append(ast.If(if_ext, (ast.Block(ext_calls,
open_scope=True),)))
if int_calls:
body.append(ast.If(if_int, (ast.Block(int_calls,
open_scope=True),)))
statements.append(ast.For(ast.Decl("unsigned int",
builder.it_sym, init=0),
ast.Less(builder.it_sym, num_facets),
ast.Incr(builder.it_sym, 1), body))
if builder.needs_mesh_layers:
# In the presence of interior horizontal facet calls, an
# IF-ELIF-ELSE block is generated using the mesh levels
# as conditions for which calls are needed:
#
# IF (layer == bottom_layer):
# *bottom calls
# ELSE IF (layer == top_layer):
# *top calls
# ELSE:
# *top calls
# *bottom calls
#
# Any extruded top or bottom calls for extruded facets are
# included within the appropriate mesh-level IF-blocks. If
# no interior horizontal facet calls are present, then
# standard IF-blocks are generated for exterior top/bottom
# facet calls when appropriate:
#
# IF (layer == bottom_layer):
# *bottom calls
#
# IF (layer == top_layer):
# *top calls
#
# The mesh level is an integer provided as a macro kernel
# argument.
# FIXME: No variable layers assumption
statements.append(ast.FlatBlock("/* Mesh levels: */\n"))
num_layers = builder.expression.ufl_domain().topological.layers - 1
int_top = builder.assembly_calls["interior_facet_horiz_top"]
int_btm = builder.assembly_calls["interior_facet_horiz_bottom"]
ext_top = builder.assembly_calls["exterior_facet_top"]
ext_btm = builder.assembly_calls["exterior_facet_bottom"]
bottom = ast.Block(int_top + ext_btm, open_scope=True)
top = ast.Block(int_btm + ext_top, open_scope=True)
rest = ast.Block(int_btm + int_top, open_scope=True)
statements.append(ast.If(ast.Eq(builder.mesh_layer_sym, 0),
(bottom,
ast.If(ast.Eq(builder.mesh_layer_sym,
num_layers - 1),
(top, rest)))))
return statements
def terminal_temporaries(builder, declared_temps):
"""Generates statements for assigning auxiliary temporaries
for nodes in an expression with "high" reference count.
Expressions which require additional temporaries are provided
by the :class:`LocalKernelBuilder`.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information.
:arg declared_temps: A `dict` keeping track of all declared
temporaries. This dictionary is updated
as terminal tensors are assigned temporaries.
"""
statements = [ast.FlatBlock("/* Declare and initialize */\n")]
for exp in builder.temps:
t = builder.temps[exp]
statements.append(ast.Decl(eigen_matrixbase_type(exp.shape), t))
statements.append(ast.FlatBlock("%s.setZero();\n" % t))
declared_temps[exp] = t
return statements
def parenthesize(arg, prec=None, parent=None):
"""Parenthesizes an expression."""
if prec is None or parent is None or prec >= parent:
return arg
return "(%s)" % arg
def metaphrase_slate_to_cpp(expr, temps, prec=None):
"""Translates a Slate expression into its equivalent representation in
the Eigen C++ syntax.
:arg expr: a :class:`slate.TensorBase` expression.
:arg temps: a `dict` of temporaries which map a given expression to its
corresponding representation as a `coffee.Symbol` object.
:arg prec: an argument dictating the order of precedence in the linear
algebra operations. This ensures that parentheticals are placed
appropriately and the order in which linear algebra operations
are performed are correct.
Returns
This function returns a `string` which represents the C/C++ code
representation of the `slate.TensorBase` expr.
"""
# If the tensor is terminal, it has already been declared.
# Coefficients in action expressions will have been declared by now,
# as well as any other nodes with high reference count.
if expr in temps:
return temps[expr].gencode()
elif isinstance(expr, slate.Transpose):
tensor, = expr.operands
return "(%s).transpose()" % metaphrase_slate_to_cpp(tensor, temps)
elif isinstance(expr, slate.Inverse):
tensor, = expr.operands
return "(%s).inverse()" % metaphrase_slate_to_cpp(tensor, temps)
elif isinstance(expr, slate.Negative):
tensor, = expr.operands
result = "-%s" % metaphrase_slate_to_cpp(tensor, temps, expr.prec)
return parenthesize(result, expr.prec, prec)
elif isinstance(expr, (slate.Add, slate.Sub, slate.Mul)):
op = {slate.Add: '+',
slate.Sub: '-',
slate.Mul: '*'}[type(expr)]
A, B = expr.operands
result = "%s %s %s" % (metaphrase_slate_to_cpp(A, temps, expr.prec),
op,
metaphrase_slate_to_cpp(B, temps, expr.prec))
return parenthesize(result, expr.prec, prec)
elif isinstance(expr, slate.Action):
tensor, = expr.operands
c, = expr.actee
result = "(%s) * %s" % (metaphrase_slate_to_cpp(tensor,
temps,
expr.prec), temps[c])
return parenthesize(result, expr.prec, prec)
else:
raise NotImplementedError("Type %s not supported.", type(expr))
def eigen_matrixbase_type(shape):
"""Returns the Eigen::Matrix declaration of the tensor.
:arg shape: a tuple of integers the denote the shape of the
:class:`slate.TensorBase` object.
Returns: Returns a string indicating the appropriate declaration of the
`slate.TensorBase` object in the appropriate Eigen C++ template
library syntax.
"""
if len(shape) == 0:
rows = 1
cols = 1
elif len(shape) == 1:
rows = shape[0]
cols = 1
else:
if not len(shape) == 2:
raise NotImplementedError(
"%d-rank tensors are not supported." % len(shape)
)
rows = shape[0]
cols = shape[1]
if cols != 1:
order = ", Eigen::RowMajor"
else:
order = ""
return "Eigen::Matrix<double, %d, %d%s>" % (rows, cols, order)
|
hixio-mh/firedrake
|
firedrake/slate/slac/compiler.py
|
compiler.py
|
py
| 22,060 |
python
|
en
|
code
| null |
github-code
|
6
|
6018330446
|
from playwright.sync_api import sync_playwright
def test_props():
with sync_playwright() as p:
browser = p.chromium.launch(headless=False)
page = browser.new_page()
page.goto("https://image.baidu.com/")
# 上传文件
file_path = r"C:/Users/yl/Desktop/1.png"
page.locator("input[type=file]").set_input_files(file_path)
# # 填充
# page.fill("#username", "yuellili")
# # 点击
# page.click("#submit")
# # 获取 iframe 元素
# page.frame_locator("iframe")
# 获取classs属性
# page.get_attribute(selector=".video-title.tit", name="class")
# 设置下拉列表
page.select_option(".province", label="湖南省")
page.select_option(".city", value="长沙市")
def main():
# test_xpath()
# test_css()
# test_playwright_selector()
test_props()
if __name__ == "__main__":
main()
|
Yuelioi/Program-Learning
|
Python/modules/web/Playwright/元素操作.py
|
元素操作.py
|
py
| 987 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30502016476
|
from django.urls import path
from . import views
app_name = 'home'
urlpatterns = [
path('', views.index, name='index'),
path('login/', views.login_view, name='login'),
path('registration/', views.registration, name='registration'),
path('logout/', views.logout_view, name='logout'),
path('profile/', views.profile_view, name='profile'),
path('profile/edit/', views.edit_profile_view, name='profile_edit')
]
|
Arctik124/tekken_league
|
home/urls.py
|
urls.py
|
py
| 434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21833663632
|
from flask import Flask, render_template, request
from werkzeug.utils import redirect
from scrapper import weather_search
app = Flask("Weather Scrapper")
@app.route("/")
def home():
area = request.args.get('area')
if area:
weather_element = weather_search(area)
print(weather_element)
return render_template("index.html", searchingBy=area, today_temp=weather_element[0], high_temp=weather_element[3], low_temp=weather_element[2])
app.run ('0.0.0.0', port=5000)
|
Sunggjinn/weather-closet
|
app.py
|
app.py
|
py
| 509 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.